python类Constant()的实例源码

conv.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_filters, filter_size, stride=1,
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, flip_filters=True,
                 convolution=conv.conv1d_mc0, **kwargs):
        if isinstance(incoming, tuple):
            input_shape = incoming
        else:
            input_shape = incoming.output_shape

        # Retrieve the supplied name, if it exists; otherwise use ''
        if 'name' in kwargs:
            basename = kwargs['name'] + '.'
            # Create a separate version of kwargs for the contained layers
            # which does not include 'name'
            layer_kwargs = dict((key, arg) for key, arg in kwargs.items() if key != 'name')
        else:
            basename = ''
            layer_kwargs = kwargs
        self.conv1d = Conv1DLayer(InputLayer((None,) + input_shape[2:]), num_filters, filter_size, stride, pad,
                                  untie_biases, W, b, nonlinearity, flip_filters, convolution, name=basename + "conv1d",
                                  **layer_kwargs)
        self.W = self.conv1d.W
        self.b = self.conv1d.b
        super(ConvTimeStep1DLayer, self).__init__(incoming, **kwargs)
crf.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(ChainCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
crf.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
                 b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')

        self.W_c = self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
crf.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, U=init.GlorotUniform(), W_h=init.GlorotUniform(),
                 W_c=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeBiAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.U = self.add_param(U, (dim_inputs, dim_inputs, self.num_labels), name='U')
        self.W_h = None if W_h is None else self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')
        self.W_c = None if W_c is None else self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
highway.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
answer_pointer_layer.py 文件源码 项目:MachineComprehension 作者: sa-j 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, max_steps, peepholes=False, mask_input=None, **kwargs):
        """
        initialization
        :param incoming: bidirectional mLSTM for passane
        :param num_units:
        :param max_steps: max num steps to generate answer words, can be tensor scalar variable
        :param peepholes:
        :param mask_input: passage's length mask
        :param kwargs:
        """
        super(AnsPointerLayer, self).__init__(incoming, num_units, peepholes=peepholes,
                                              precompute_input=False, mask_input=mask_input,
                                              only_return_final=False, **kwargs)
        self.max_steps = max_steps
        # initializes attention weights
        input_shape = self.input_shapes[0]
        num_inputs = np.prod(input_shape[2:])
        self.V_pointer = self.add_param(init.Normal(0.1), (num_inputs, num_units), 'V_pointer')
        # doesn't need transpose
        self.v_pointer = self.add_param(init.Normal(0.1), (num_units, 1), 'v_pointer')
        self.W_a_pointer = self.add_param(init.Normal(0.1), (num_units, num_units), 'W_a_pointer')
        self.b_a_pointer = self.add_param(init.Constant(0.), (1, num_units), 'b_a_pointer')
        self.c_pointer = self.add_param(init.Constant(0.), (1, 1), 'c_pointer')
custom_layers.py 文件源码 项目:MachineComprehension 作者: sa-j 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(CustomDense, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = self.input_shape[-1]

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)
custom_layers.py 文件源码 项目:MachineComprehension 作者: sa-j 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, max_steps, peepholes=False, mask_input=None, **kwargs):
        """
        initialization
        :param incoming: bidirectional mLSTM for passane
        :param num_units:
        :param max_steps: max num steps to generate answer words, can be tensor scalar variable
        :param peepholes:
        :param mask_input: passage's length mask
        :param kwargs:
        """
        super(AnsPointerLayer, self).__init__(incoming, num_units, peepholes=peepholes,
                                              precompute_input=False, mask_input=mask_input,
                                              only_return_final=False, **kwargs)
        self.max_steps = max_steps
        # initializes attention weights
        input_shape = self.input_shapes[0]
        num_inputs = np.prod(input_shape[2:])
        self.V_pointer = self.add_param(init.Normal(0.1), (num_inputs, num_units), 'V_pointer')
        # doesn't need transpose
        self.v_pointer = self.add_param(init.Normal(0.1), (num_units, 1), 'v_pointer')
        self.W_a_pointer = self.add_param(init.Normal(0.1), (num_units, num_units), 'W_a_pointer')
        self.b_a_pointer = self.add_param(init.Constant(0.), (num_units, ), 'b_a_pointer')
        c_pointer = theano.shared(np.array([0.], dtype='float32'), name='c_pointer', broadcastable=(True, ))
        self.c_pointer = self.add_param(c_pointer, (1,), 'c_pointer')
parser.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
                 b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(DepParserLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        num_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h')

        self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
crf.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(CRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
graph.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
highway.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
layers.py 文件源码 项目:gogh-figure 作者: joelmoniz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_styles=None, epsilon=1e-4,
                 beta=Constant(0), gamma=Constant(1), **kwargs):
        super(InstanceNormLayer, self).__init__(incoming, **kwargs)

        self.axes = (2, 3)
        self.epsilon = epsilon

        if num_styles == None:
            shape = (self.input_shape[1],)
        else:
            shape = (num_styles, self.input_shape[1])

        if beta is None:
            self.beta = None
        else:
            self.beta = self.add_param(beta, shape, 'beta',
                                       trainable=True, regularizable=False)
        if gamma is None:
            self.gamma = None
        else:
            self.gamma = self.add_param(gamma, shape, 'gamma',
                                        trainable=True, regularizable=True)
tgate.py 文件源码 项目:time_lstm 作者: DarryO 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, W_in=init.Normal(0.1), W_hid=init.Normal(0.1),
                 W_cell=init.Normal(0.1), W_to=init.Normal(0.1),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.sigmoid):
        self.W_in = W_in
        self.W_hid = W_hid
        self.W_to = W_to
        # Don't store a cell weight vector when cell is None
        if W_cell is not None:
            self.W_cell = W_cell
        self.b = b
        # For the nonlinearity, if None is supplied, use identity
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity
merge_dense.py 文件源码 项目:crayimage 作者: yandexdataschool 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, incomings, num_units, nonlinearity=nonlinearities.sigmoid,
               W=init.Uniform(), b = init.Constant(0.0), **kwargs):
    super(MergeDense, self).__init__(incomings=incomings, **kwargs)

    self.num_units = num_units

    self.input_shapes = [ inc.output_shape for inc in incomings ]

    self.weights = [
      self.get_weights(W, shape=input_shape, name='W%d' % i)
      for i, input_shape in enumerate(self.input_shapes)
    ]

    self.b = self.add_param(b, (self.num_units,), name="b", regularizable=False)

    self.nonlinearity = nonlinearity
memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
LocallyDenseLayer.py 文件源码 项目:2WayNet 作者: aviveise 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, name=None, **kwargs):
        """
        An extention of a regular dense layer, enables the sharing of weight between two tied hidden layers. In order
        to tie two layers, the first should be initialized with an initialization function for the weights, the other
        should get the weight matrix of the first at input
        :param incoming: the input layer of this layer
        :param num_units: output size
        :param W: weight initialization, can be a initialization function or a given matrix
        :param b: bias initialization
        :param nonlinearity: non linearity function
        :param name: string
        :param kwargs:
        """
        super(TiedDenseLayer, self).__init__(incoming, num_units, W, b, nonlinearity, name=name)

        if not isinstance(W, lasagne.init.Initializer):
            self.params[self.W].remove('trainable')
            self.params[self.W].remove('regularizable')

        if self.b and not isinstance(b, lasagne.init.Initializer):
            self.params[self.b].remove('trainable')
layers.py 文件源码 项目:melanoma-transfer 作者: learningtitans 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def conv_params(num_filters, filter_size=(3, 3), stride=(1, 1), border_mode='same',
         nonlinearity=rectify, W=init.Orthogonal(gain=1.0),
         b=init.Constant(0.05), untie_biases=False, **kwargs):
    args = {
        'num_filters': num_filters,
        'filter_size': filter_size,
        'stride': stride,
        'pad': border_mode,         # The new version has 'pad' instead of 'border_mode'
        'nonlinearity': nonlinearity,
        'W': W,
        'b': b,
        'untie_biases': untie_biases,
    }
    args.update(kwargs)
    return args
layers.py 文件源码 项目:melanoma-transfer 作者: learningtitans 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dense_params(num_units, nonlinearity=rectify, **kwargs):
    args = {
        'num_units': num_units,
        'nonlinearity': nonlinearity,
        'W': init.Orthogonal(1.0),
        'b': init.Constant(0.05),
    }
    args.update(kwargs)
    return args
network.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
custom_layers.py 文件源码 项目:MachineComprehension 作者: sa-j 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, ingate=Gate(), forgetgate=Gate(),
                 cell=Gate(W_cell=None, nonlinearity=nonlinearities.tanh), outgate=Gate(),
                 nonlinearity=nonlinearities.tanh, cell_init=init.Constant(0.), hid_init=init.Constant(0.),
                 backwards=False, learn_init=False, peepholes=True, gradient_steps=-1, grad_clipping=0,
                 unroll_scan=False, precompute_input=True, mask_input=None, **kwargs):
        super(CustomLSTMEncoder, self).__init__(incoming, num_units, ingate, forgetgate, cell, outgate, nonlinearity,
                                                cell_init, hid_init, backwards, learn_init, peepholes, gradient_steps,
                                                grad_clipping, unroll_scan, precompute_input, mask_input, False,
                                                **kwargs)
began_network.py 文件源码 项目:began 作者: davidtellez 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_b(network, layer_name):
    if (network is not None) and (layer_name in network):
        b = network[layer_name].b
    else:
        b = Constant(0.)  # default value in Lasagne
    return b
network.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
batch_norm_layer.py 文件源码 项目:experiments 作者: tencia 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
                 mode='low_mem', beta=init.Constant(0), gamma=init.Constant(1),
                 mean=init.Constant(0), inv_std=init.Constant(1), **kwargs):
        super(BatchNormLayer, self).__init__(incoming, **kwargs)

        if axes == 'auto':
            # default: normalize over all but the second axis
            axes = (0,) + tuple(range(2, len(self.input_shape)))
        elif isinstance(axes, int):
            axes = (axes,)
        self.axes = axes

        self.epsilon = epsilon
        self.alpha = alpha
        self.mode = mode

        # create parameters, ignoring all dimensions in axes
        shape = [size for axis, size in enumerate(self.input_shape)
                 if axis not in self.axes]
        if any(size is None for size in shape):
            raise ValueError("BatchNormLayer needs specified input sizes for "
                             "all axes not normalized over.")
        if beta is None:
            self.beta = None
        else:
            self.beta = self.add_param(beta, shape, 'beta',
                                       trainable=True, regularizable=False)
        if gamma is None:
            self.gamma = None
        else:
            self.gamma = self.add_param(gamma, shape, 'gamma',
                                        trainable=True, regularizable=True)
        self.mean = self.add_param(mean, shape, 'mean',
                                   trainable=False, regularizable=False)
        self.inv_std = self.add_param(inv_std, shape, 'inv_std',
                                      trainable=False, regularizable=False)
memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(NormalizedAttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(AttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
DMN.py 文件源码 项目:DynamicMemoryNetworks 作者: swstarlab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, W_in=Normal(0.1), W_hid=Normal(0.1),
                 b=Constant(0.), nonlinearity=nonlin.sigmoid):
        self.W_in  = W_in
        self.W_hid = W_hid
        self.b     = b
        if nonlinearity is None:
            self.nonlinearity = nonlin.identity
        else:
            self.nonlinearity = nonlinearity


问题


面经


文章

微信
公众号

扫码关注公众号