python类Constant()的实例源码

layers.py 文件源码 项目:drmad 作者: bigaidream-projects 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 num_leading_axes=1, **kwargs):
        super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        if num_leading_axes >= len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "leaving no trailing axes for the dot product." %
                    (num_leading_axes, len(self.input_shape)))
        elif num_leading_axes < -len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "requesting more trailing axes than there are input "
                    "dimensions." % (num_leading_axes, len(self.input_shape)))
        self.num_leading_axes = num_leading_axes

        if any(s is None for s in self.input_shape[num_leading_axes:]):
            raise ValueError(
                    "A DenseLayer requires a fixed input shape (except for "
                    "the leading axes). Got %r for num_leading_axes=%d." %
                    (self.input_shape, self.num_leading_axes))
        num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)

        if args.regL1 is True:
            self.L1 = self.add_param(init.Constant(args.regInit['L1']),
                                     (num_inputs, num_units), name="L1")
        if args.regL2 is True:
            self.L2 = self.add_param(init.Constant(args.regInit['L2']),
                                     (num_inputs, num_units), name="L2")
MyLayers.py 文件源码 项目:CIKM2017 作者: MovieFIB 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(
        self, incoming, num_units,
        W=init.Constant(0.1),
        b=init.Constant(0.),
        nonlinearity=nonlinearities.rectify,
        **kwargs
    ):
        super(Tensor4LinearLayer, self).__init__(incoming, **kwargs)
        num_inputs = self.input_shape[-1]
        self.num_units = num_units
        self.W = self.add_param(
            W, (num_inputs, num_units),
            name="W"
        )
        if b:
            self.b = self.add_param(
                b,
                (
                    self.input_shape[1],
                    self.input_shape[2], self.num_units
                )
            )
        else:
            self.b = None
        if nonlinearity:
            self.nonlinearity = nonlinearity
        else:
            self.nonlinearity = nonlinearities.identity
MyLayers.py 文件源码 项目:CIKM2017 作者: MovieFIB 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(
        self, incoming, num_units,
        W=init.Constant(0.1),
        b=init.Constant(0.),
        nonlinearity=nonlinearities.rectify,
        **kwargs
    ):
        super(Tensor3LinearLayer, self).__init__(incoming, **kwargs)
        num_inputs = self.input_shape[-1]
        self.num_units = num_units
        self.W = self.add_param(
            W, (num_inputs, num_units),
            name="W"
        )
        if b:
            self.b = self.add_param(
                b,
                (
                    self.input_shape[1], self.num_units
                )
            )
        else:
            self.b = None
        if nonlinearity:
            self.nonlinearity = nonlinearity
        else:
            self.nonlinearity = nonlinearities.identity
layers.py 文件源码 项目:opt-mmd 作者: dougalsutherland 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_centers,
                 locs=init.Normal(std=1), log_sigma=init.Constant(0.),
                 **kwargs):
        super(RBFLayer, self).__init__(incoming, **kwargs)
        self.num_centers = num_centers

        assert len(self.input_shape) == 2
        in_dim = self.input_shape[1]
        self.locs = self.add_param(locs, (num_centers, in_dim), name='locs',
                                   regularizable=False)
        self.log_sigma = self.add_param(log_sigma, (), name='log_sigma')
layers.py 文件源码 项目:opt-mmd 作者: dougalsutherland 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_freqs,
                 freqs=init.Normal(std=1), log_sigma=init.Constant(0.),
                 **kwargs):
        super(SmoothedCFLayer, self).__init__(incoming, **kwargs)
        self.num_freqs = num_freqs

        assert len(self.input_shape) == 2
        in_dim = self.input_shape[1]
        self.freqs = self.add_param(freqs, (num_freqs, in_dim), name='freqs')
        self.log_sigma = self.add_param(log_sigma, (), name='log_sigma')
layers.py 文件源码 项目:LSTM-and-maxlayer-for-SNV-based-phenotype-prediction 作者: widmi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
        super(InputDropoutLayer, self).__init__(incoming, **kwargs)
        self.p = p
        self.rescale = rescale
        constant_dim = 1
        self.constant_dim = constant_dim
        self.dropoutshape = (self.input_shape[:constant_dim]) + (self.input_shape[constant_dim+1:])
        self.dropoutlayer = lasagne.layers.DropoutLayer(incoming=self.dropoutshape, p=p, rescale=rescale, **kwargs)

        # add parameters to this layer
        self.params.update(self.dropoutlayer.params)

        self.dropoutmask = self.add_param(init.Constant(1), 
                                          self.dropoutshape, 'dropoutmask',
                                          trainable=False, regularizable=False)
chef.py 文件源码 项目:baal 作者: braingineer 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def make_b(self, size, name):
        P = create_param(Constant(0.), (size, ), name=name)
        self.parameters[name] = P
        return P

    ######################
    ### layer management
    ######################
DropLSTMLayer.py 文件源码 项目:kaggle-quora-solution-8th 作者: qqgeogor 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units,
                 ingate=Gate(),
                 forgetgate=Gate(),
                 cell=Gate(W_cell=None, nonlinearity=nonlinearities.tanh),
                 outgate=Gate(),
                 nonlinearity=nonlinearities.tanh,
                 cell_init=init.Constant(0.),
                 hid_init=init.Constant(0.),
                 backwards=False,
                 learn_init=False,
                 peepholes=True,
                 gradient_steps=-1,
                 grad_clipping=0,
                 unroll_scan=False,
                 precompute_input=True,
                 mask_input=None,
                 only_return_final=False,
                 inter_drop=0.05,
                 **kwargs):
        super(DropLSTMLayer, self).__init__(incoming, num_units,
                                            ingate, forgetgate, cell, outgate,
                                            nonlinearity, cell_init, hid_init,
                                            backwards, learn_init, peepholes,
                                            gradient_steps, grad_clipping, unroll_scan,
                                            precompute_input, mask_input, only_return_final, **kwargs)
        self.inter_retain_prob = 1 - inter_drop
        self._srng = RandomStreams(
            lasagne.random.get_rng().randint(1, 2147462579))
density_layers.py 文件源码 项目:VAESSL 作者: lovecambi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, incoming, axes='auto', epsilon=1e-6, alpha=1e-2,
                 beta=init.Constant(0), gamma=init.Constant(1),
                 mean=init.Constant(0), std=init.Constant(1), **kwargs):
        super(MyBatchNormLayer, self).__init__(incoming, **kwargs)

        if axes == 'auto':
            # default: normalize over all but the second axis
            axes = (0,) + tuple(range(2, len(self.input_shape)))
        elif isinstance(axes, int):
            axes = (axes,)
        self.axes = axes

        self.epsilon = epsilon
        self.alpha = alpha

        # create parameters, ignoring all dimensions in axes
        shape = [size for axis, size in enumerate(self.input_shape)
                 if axis not in self.axes]
        if any(size is None for size in shape):
            raise ValueError("BatchNormLayer needs specified input sizes for "
                             "all axes not normalized over.")
        if beta is None:
            self.beta = None
        else:
            self.beta = self.add_param(beta, shape, 'beta',
                                       trainable=True, regularizable=False)
        if gamma is None:
            self.gamma = None
        else:
            self.gamma = self.add_param(gamma, shape, 'gamma',
                                        trainable=True, regularizable=True)
        self.mean = self.add_param(mean, shape, 'mean',
                                   trainable=False, regularizable=False)
        self.std = self.add_param(std, shape, 'std',
                                      trainable=False, regularizable=False)
layers.py 文件源码 项目:aed-by-cnn 作者: tweihaha 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, filter_size,
                 init_std=5., W_logstd=None,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        if W_logstd is None:
            init_std = np.asarray(init_std, dtype=floatX)
            W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False)
        self.W = self.make_gaussian_filter()
layers.py 文件源码 项目:aed-by-cnn 作者: tweihaha 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, filter_size, init_std=5.,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(FixedGaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        init_std = np.asarray(init_std, dtype=floatX)
        W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False,
                                       trainable=False)
        self.W = self.make_gaussian_filter()
network.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
tgate.py 文件源码 项目:time_lstm 作者: DarryO 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, W_t=init.Normal(0.1), W_x=init.Normal(0.1),
            b=init.Constant(0.),
            nonlinearity_inside=nonlinearities.tanh,
            nonlinearity_outside=nonlinearities.sigmoid):
        self.W_t = W_t
        self.W_x = W_x
        self.b = b
        self.nonlinearity_inside = nonlinearity_inside
        self.nonlinearity_outside = nonlinearity_outside
plstm.py 文件源码 项目:time_lstm 作者: DarryO 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self,
                 Period=init.Uniform((10,100)),
                 Shift=init.Uniform( (0., 1000.)),
                 On_End=init.Constant(0.05)):
        self.Period = Period
        self.Shift = Shift
        self.On_End = On_End
network.py 文件源码 项目:maml_rl 作者: cbfinn 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(NormalizedAttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(AttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
BNLayer.py 文件源码 项目:2WayNet 作者: aviveise 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, incoming,
                 gamma=init.Uniform([0.95, 1.05]),
                 beta=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 epsilon=0.001,
                 **kwargs):
        super(BatchNormalizationLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_units = int(numpy.prod(self.input_shape[1:]))
        self.gamma = self.add_param(gamma, (self.num_units,), name="BatchNormalizationLayer:gamma", regularizable=True,
                                    gamma=True, trainable=True)
        self.beta = self.add_param(beta, (self.num_units,), name="BatchNormalizationLayer:beta", regularizable=False)
        self.epsilon = epsilon

        self.mean_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.mean_inference.name = "shared:mean"

        self.variance_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.variance_inference.name = "shared:variance"


问题


面经


文章

微信
公众号

扫码关注公众号