python类GlorotUniform()的实例源码

conv.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 68 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_filters, filter_size, stride=1,
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, flip_filters=True,
                 convolution=conv.conv1d_mc0, **kwargs):
        if isinstance(incoming, tuple):
            input_shape = incoming
        else:
            input_shape = incoming.output_shape

        # Retrieve the supplied name, if it exists; otherwise use ''
        if 'name' in kwargs:
            basename = kwargs['name'] + '.'
            # Create a separate version of kwargs for the contained layers
            # which does not include 'name'
            layer_kwargs = dict((key, arg) for key, arg in kwargs.items() if key != 'name')
        else:
            basename = ''
            layer_kwargs = kwargs
        self.conv1d = Conv1DLayer(InputLayer((None,) + input_shape[2:]), num_filters, filter_size, stride, pad,
                                  untie_biases, W, b, nonlinearity, flip_filters, convolution, name=basename + "conv1d",
                                  **layer_kwargs)
        self.W = self.conv1d.W
        self.b = self.conv1d.b
        super(ConvTimeStep1DLayer, self).__init__(incoming, **kwargs)
crf.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(ChainCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
crf.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
                 b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')

        self.W_c = self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
crf.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, U=init.GlorotUniform(), W_h=init.GlorotUniform(),
                 W_c=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeBiAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.U = self.add_param(U, (dim_inputs, dim_inputs, self.num_labels), name='U')
        self.W_h = None if W_h is None else self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')
        self.W_c = None if W_c is None else self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
graph.py 文件源码 项目:NeuroNLP 作者: XuezheMax 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
custom_layers.py 文件源码 项目:MachineComprehension 作者: sa-j 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(CustomDense, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = self.input_shape[-1]

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)
parser.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
                 b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(DepParserLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        num_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h')

        self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
crf.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(CRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
graph.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
highway.py 文件源码 项目:LasagneNLP 作者: XuezheMax 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
sde_autoencoder.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_encoder_layers(input_size, encode_size, sigma=0.5):
    """
    builds an autoencoder with gaussian noise layer
    :param input_size: input size
    :param encode_size: encoded size
    :param sigma: gaussian noise standard deviation
    :return: Weights of encoder layer, denoising autoencoder layer
    """
    W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))

    layers = [
        (InputLayer, {'shape': (None, input_size)}),
        (GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
        (DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': sigmoid, 'W': W}),
        (DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
    ]
    return W, layers
memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
LocallyDenseLayer.py 文件源码 项目:2WayNet 作者: aviveise 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, name=None, **kwargs):
        """
        An extention of a regular dense layer, enables the sharing of weight between two tied hidden layers. In order
        to tie two layers, the first should be initialized with an initialization function for the weights, the other
        should get the weight matrix of the first at input
        :param incoming: the input layer of this layer
        :param num_units: output size
        :param W: weight initialization, can be a initialization function or a given matrix
        :param b: bias initialization
        :param nonlinearity: non linearity function
        :param name: string
        :param kwargs:
        """
        super(TiedDenseLayer, self).__init__(incoming, num_units, W, b, nonlinearity, name=name)

        if not isinstance(W, lasagne.init.Initializer):
            self.params[self.W].remove('trainable')
            self.params[self.W].remove('regularizable')

        if self.b and not isinstance(b, lasagne.init.Initializer):
            self.params[self.b].remove('trainable')
dssm.py 文件源码 项目:recom-system 作者: tizot 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_multi_dssm(input_var=None, num_samples=None, num_entries=6, num_ngrams=42**3, num_hid1=300, num_hid2=300, num_out=128):
    """Builds a DSSM structure in a Lasagne/Theano way.

    The built DSSM is the neural network that computes the projection of only one paper.
    The input ``input_var`` should have two dimensions: (``num_samples * num_entries``, ``num_ngrams``).
    The output is then computed in a batch way: one paper at a time, but all papers from the same sample in the dataset are grouped
    (cited papers, citing papers and ``num_entries - 2`` irrelevant papers).

    Args:
        input_var (:class:`theano.tensor.TensorType` or None): symbolic input variable of the DSSM
        num_samples (int): the number of samples in the batch input dataset (number of rows)
        num_entries (int): the number of compared papers in the DSSM structure
        num_ngrams (int): the size of the vocabulary
        num_hid1 (int): the number of units in the first hidden layer
        num_hid2 (int): the number of units in the second hidden layer
        num_out (int): the number of units in the output layer

    Returns:
        :class:`lasagne.layers.Layer`: the output layer of the DSSM
    """

    assert (num_entries > 2)

    # Initialise input layer
    if num_samples is None:
        num_rows = None
    else:
        num_rows = num_samples * num_entries

    l_in = layers.InputLayer(shape=(num_rows, num_ngrams), input_var=input_var)

    # Initialise the hidden and output layers or the DSSM
    l_hid1 = layers.DenseLayer(l_in, num_units=num_hid1, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
    l_hid2 = layers.DenseLayer(l_hid1, num_units=num_hid2, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
    l_out = layers.DenseLayer(l_hid2, num_units=num_out, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())

    l_out = layers.ExpressionLayer(l_out, lambda X: X / X.norm(2), output_shape='auto')

    return l_out
network.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
began_network.py 文件源码 项目:began 作者: davidtellez 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_W(network, layer_name):
    if (network is not None) and (layer_name in network):
        W = network[layer_name].W
    else:
        W = GlorotUniform()  # default value in Lasagne
    return W
network.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(NormalizedAttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(AttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
layers.py 文件源码 项目:drmad 作者: bigaidream-projects 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 num_leading_axes=1, **kwargs):
        super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        if num_leading_axes >= len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "leaving no trailing axes for the dot product." %
                    (num_leading_axes, len(self.input_shape)))
        elif num_leading_axes < -len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "requesting more trailing axes than there are input "
                    "dimensions." % (num_leading_axes, len(self.input_shape)))
        self.num_leading_axes = num_leading_axes

        if any(s is None for s in self.input_shape[num_leading_axes:]):
            raise ValueError(
                    "A DenseLayer requires a fixed input shape (except for "
                    "the leading axes). Got %r for num_leading_axes=%d." %
                    (self.input_shape, self.num_leading_axes))
        num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)

        if args.regL1 is True:
            self.L1 = self.add_param(init.Constant(args.regInit['L1']),
                                     (num_inputs, num_units), name="L1")
        if args.regL2 is True:
            self.L2 = self.add_param(init.Constant(args.regInit['L2']),
                                     (num_inputs, num_units), name="L2")
bidnn.py 文件源码 项目:BiDNN 作者: v-v 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __create_toplogy__(self, input_var_first=None, input_var_second=None):
        # define network topology
        if (self.conf.rep % 2 != 0):
            raise ValueError("Representation size should be divisible by two as it's formed by combining two crossmodal translations", self.conf.rep)

        # input layers
        l_in_first  = InputLayer(shape=(self.conf.batch_size, self.conf.mod1size), input_var=input_var_first)
        l_in_second = InputLayer(shape=(self.conf.batch_size, self.conf.mod2size), input_var=input_var_second)

        # first -> second
        l_hidden1_first   = DenseLayer(l_in_first, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform())         # enc1
        l_hidden2_first   = DenseLayer(l_hidden1_first, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2
        l_hidden2_first_d = DropoutLayer(l_hidden2_first, p=self.conf.dropout)
        l_hidden3_first   = DenseLayer(l_hidden2_first_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform())    # dec1
        l_out_first       = DenseLayer(l_hidden3_first, num_units=self.conf.mod2size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2

        if self.conf.untied:
            # FREE
            l_hidden1_second   = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform())         # enc1
            l_hidden2_second   = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2
            l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout)
            l_hidden3_second   = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform())    # dec1
            l_out_second       = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2
        else:
            # TIED middle
            l_hidden1_second   = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform())             # enc1
            l_hidden2_second   = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=l_hidden3_first.W.T) # enc2
            l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout)
            l_hidden3_second   = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=l_hidden2_first.W.T) # dec1
            l_out_second       = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform())  # dec2

        l_out = concat([l_out_first, l_out_second])

        return l_out, l_hidden2_first, l_hidden2_second
base.py 文件源码 项目:gelato 作者: ferrine 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def smart_init(shape):
    if len(shape) > 1:
        return init.GlorotUniform()(shape)
    else:
        return init.Normal()(shape)
sde_autoencoder.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_encoder(input_var=None, encoder_units=None):
    layer_input = las.layers.InputLayer(shape=(None, 1200),
                                        input_var=input_var)
    layer_encoder = las.layers.DenseLayer(
        layer_input, num_units=encoder_units,
        nonlinearity=las.nonlinearities.sigmoid,
        W=las.init.GlorotUniform())
    layer_decoder = las.layers.DenseLayer(layer_encoder, num_units=1200,
                                          nonlinearity=None,
                                          W=layer_encoder.W.T)
    return layer_decoder
sde_autoencoder.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_bottleneck_layer(input_size, encode_size, sigma=0.3):
    W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))

    layers = [
        (InputLayer, {'shape': (None, input_size)}),
        (GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
        (DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': linear, 'W': W}),
        (DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
    ]
    return W, layers
FaceAlignment.py 文件源码 项目:DeepAlignmentNetwork 作者: MarekKowalski 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def createCNN(self):
        net = {}
        net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)       
        print("Input shape: {0}".format(net['input'].output_shape))

        #STAGE 1
        net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)

        net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)

        net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)

        net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)

        net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
        net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))

        net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
        net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)

        for i in range(1, self.nStages):
            self.addDANStage(i + 1, net)

        net['output'] = net['s' + str(self.nStages) + '_landmarks']

        return net
FaceAlignmentTraining.py 文件源码 项目:DeepAlignmentNetwork 作者: MarekKowalski 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def createCNN(self):
        net = {}
        net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)       
        print("Input shape: {0}".format(net['input'].output_shape))

        #STAGE 1
        net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)

        net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)

        net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)

        net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)

        net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
        net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))

        net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
        net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)

        for i in range(1, self.nStages):
            self.addDANStage(i + 1, net)

        net['output'] = net['s' + str(self.nStages) + '_landmarks']

        return net
network.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
network.py 文件源码 项目:maml_rl 作者: cbfinn 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
analysis_memory.py 文件源码 项目:MEM_DGM 作者: zhenxuan00 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(NormalizedAttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)


问题


面经


文章

微信
公众号

扫码关注公众号