python类Lambda()的实例源码

test_losses.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_GWD(self):
        # Compute categorical crossentropy
        indices = self.mock_y > 0
        selected_log = -np.log(self.mock_x_softmax[indices])
        self.loss = 0#np.sum(selected_log) / np.sum(self.mock_y)
        # Create keras model with this activation and compile it
        model = Sequential()
        activation_layer = Lambda(lambda x: x,
                                  input_shape=self.data_shape[1:],
                                  output_shape=self.data_shape[1:]
                                  )
        model.add(activation_layer)
        model.compile('sgd', loss=gwd)

        # Predict data from the model
        loss = model.evaluate(self.mock_y, self.mock_y, batch_size=1, verbose=0)
        # Assertions
        print('Expected loss: {}'.format(self.loss))
        print('Actual loss: {}'.format(loss))
        self.assertTrue(np.allclose(loss, self.loss),
                        msg='Categorical cross-entropy loss 3D does not produce the expected results')
layers.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def masked_softmax(tensor, mask, expand=2, axis=1):
    """Masked soft-max using Lambda and merge-multiplication.

    Args:
        tensor: tensor containing scores
        mask: mask for tensor where 1 - means values at this position and 0 - means void, padded, etc..
        expand: axis along which to repeat mask
        axis: axis along which to compute soft-max

    Returns:
        masked soft-max values
    """

    mask = tf.expand_dims(mask, axis=expand)
    exponentiate = Lambda(lambda x: K.exp(x - K.max(x, axis=axis, keepdims=True)))(tensor)
    masked = tf.multiply(exponentiate, mask)
    div = tf.expand_dims(tf.reduce_sum(masked, axis=axis), axis=axis)
    predicted = tf.divide(masked, div)
    return predicted
layers.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def answer_start_pred(context_encoding, question_attention_vector, context_mask, W, dropout_rate):
    """Answer start prediction layer."""

    answer_start = Lambda(lambda arg:
                          concatenate([arg[0], arg[1], arg[2]]))([
        context_encoding,
        question_attention_vector,
        multiply([context_encoding, question_attention_vector])])

    answer_start = TimeDistributed(Dense(W, activation='relu'))(answer_start)
    answer_start = Dropout(rate=dropout_rate)(answer_start)
    answer_start = TimeDistributed(Dense(1))(answer_start)

    # apply masking
    answer_start = Lambda(lambda q: masked_softmax(q[0], q[1]))([answer_start, context_mask])
    answer_start = Lambda(lambda q: flatten(q))(answer_start)
    return answer_start
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_attention_layer(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="attention_generator")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_attention_layer_f(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_forw")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def create_attention_layer_b(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.ones((1,1)), np.zeros((self.max_sequence_length-1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_back")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def bilstm_woatt_model(self):
        """Define a model with bi-LSTM layers and without attention."""

        input_a = Input(shape=(self.max_sequence_length, self.embedding_dim,))
        input_b = Input(shape=(self.max_sequence_length, self.embedding_dim,))
        lstm_layer = self.create_lstm_layer_last(self.max_sequence_length)
        lstm_last_a = lstm_layer(input_a)
        lstm_last_b = lstm_layer(input_b)

        dist = Lambda(self.cosine_dist, output_shape=self.cosine_dist_output_shape,
                      name="similarity_network")([lstm_last_a, lstm_last_b])

        dense = Dense(1, activation='sigmoid', name='similarity_score',
                      kernel_regularizer=None,
                      bias_regularizer=None,
                      activity_regularizer=None)(dist)

        model = Model([input_a, input_b], dense)

        return model
rank_losses.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def rank_crossentropy_loss(kwargs=None):
    neg_num = 1
    if isinstance(kwargs, dict) and 'neg_num' in kwargs:
        neg_num = kwargs['neg_num']
    def _cross_entropy_loss(y_true, y_pred):
        y_pos_logits = Lambda(lambda a: a[::(neg_num+1), :], output_shape= (1,))(y_pred)
        y_pos_labels = Lambda(lambda a: a[::(neg_num+1), :], output_shape= (1,))(y_true)
        logits_list, labels_list = [y_pos_logits], [y_pos_labels]
        for i in range(neg_num):
            y_neg_logits = Lambda(lambda a: a[(i+1)::(neg_num+1), :], output_shape= (1,))(y_pred)
            y_neg_labels = Lambda(lambda a: a[(i+1)::(neg_num+1), :], output_shape= (1,))(y_true)
            logits_list.append(y_neg_logits)
            labels_list.append(y_neg_labels)
        logits = tf.concat(logits_list, axis=1)
        labels = tf.concat(labels_list, axis=1)
        return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    return _cross_entropy_loss
rank_losses.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def rank_crossentropy_loss(kwargs=None):
    neg_num = 1
    if isinstance(kwargs, dict) and 'neg_num' in kwargs:
        neg_num = kwargs['neg_num']
    def _cross_entropy_loss(y_true, y_pred):
        y_pos_logits = Lambda(lambda a: a[::(neg_num+1), :], output_shape= (1,))(y_pred)
        y_pos_labels = Lambda(lambda a: a[::(neg_num+1), :], output_shape= (1,))(y_true)
        logits_list, labels_list = [y_pos_logits], [y_pos_labels]
        for i in range(neg_num):
            y_neg_logits = Lambda(lambda a: a[(i+1)::(neg_num+1), :], output_shape= (1,))(y_pred)
            y_neg_labels = Lambda(lambda a: a[(i+1)::(neg_num+1), :], output_shape= (1,))(y_true)
            logits_list.append(y_neg_logits)
            labels_list.append(y_neg_labels)
        logits = tf.concat(logits_list, axis=1)
        labels = tf.concat(labels_list, axis=1)
        return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    return _cross_entropy_loss
model.py 文件源码 项目:keras-molecules 作者: maxhodak 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
inception_resnet_v2.py 文件源码 项目:keras-inception-resnetV2 作者: kentsommer 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def block17(input, scale=1.0, activation_fn='relu'):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    shortcut = input

    tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)

    tower_conv1_0 = conv2d_bn(input, 128, 1, 1, activ_fn=activation_fn)
    tower_conv1_1 = conv2d_bn(tower_conv1_0, 160, 1, 7, activ_fn=activation_fn)
    tower_conv1_2 = conv2d_bn(tower_conv1_1, 192, 7, 1, activ_fn=activation_fn)

    mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis)

    up = conv2d_bn(mixed, 1088, 1, 1, activ_fn=False, normalize=False)

    up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up)

    net = merge([shortcut, up], mode='sum')

    if activation_fn:
        net = Activation(activation_fn)(net)
    return net
inception_resnet_v2.py 文件源码 项目:keras-inception-resnetV2 作者: kentsommer 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def block8(input, scale=1.0, activation_fn='relu'):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    shortcut = input

    tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)

    tower_conv1_0 = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)
    tower_conv1_1 = conv2d_bn(tower_conv1_0, 224, 1, 3, activ_fn=activation_fn)
    tower_conv1_2 = conv2d_bn(tower_conv1_1, 256, 3, 1, activ_fn=activation_fn)

    mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis)

    up = conv2d_bn(mixed, 2080, 1, 1, activ_fn=False, normalize=False)

    up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up)

    net = merge([shortcut, up], mode='sum')

    if activation_fn:
        net = Activation(activation_fn)(net)
    return net
BMM_attention_model.py 文件源码 项目:BMM_attentional_CNN 作者: dvatterott 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
gated_block.py 文件源码 项目:eva 作者: israelg99 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, model):
        if self.crop_right:
            model = Lambda(lambda x: x[:, :, :K.int_shape(x)[2]-1, :])(model)

        if self.v is not None:
            model = Merge(mode='sum')([model, self.v])

        if self.h is not None:
            hV = Dense(output_dim=2*self.filters)(self.h)
            hV = Reshape((1, 1, 2*self.filters))(hV)
            model = Lambda(lambda x: x[0]+x[1])([model,hV])

        model_f = Lambda(lambda x: x[:,:,:,:self.filters])(model)
        model_g = Lambda(lambda x: x[:,:,:,self.filters:])(model)

        model_f = Lambda(lambda x: K.tanh(x))(model_f)
        model_g = Lambda(lambda x: K.sigmoid(x))(model_g)

        res = Merge(mode='mul')([model_f, model_g])
        return res
test_keras.py 文件源码 项目:wtte-rnn 作者: ragulpr 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model
train_bts.py 文件源码 项目:Msc_Multi_label_ZeroShot 作者: thomasSve 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def define_network(vector_size, loss):
    base_model = InceptionV3(weights='imagenet', include_top=True)

    for layer in base_model.layers: # Freeze layers in pretrained model
        layer.trainable = False

    # fully-connected layer to predict 
    x = Dense(4096, activation='relu', name='fc1')(base_model.layers[-2].output)
    x = Dense(8096, activation='relu', name='fc2')(x)
    x = Dropout(0.5)(x)
    x = Dense(2048,activation='relu', name='fc3')(x)
    predictions = Dense(vector_size, activation='relu')(x)
    l2 = Lambda(lambda x: K.l2_normalize(x, axis=1))(predictions)
    model = Model(inputs=base_model.inputs, outputs=l2)

    optimizer = 'adam'
    if loss == 'euclidean':
        model.compile(optimizer = optimizer, loss = euclidean_distance)
    else:
        model.compile(optimizer = optimizer, loss = loss)

    return model
variational_autoencoder.py 文件源码 项目:pydl 作者: rafaeltg 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _create_layers(self, input_layer):

        """ Create the encoding and the decoding layers of the variational autoencoder.
        :return: self
        """

        n_inputs = K.int_shape(input_layer)[1]

        # Encode layers
        encode_layer = Dense(units=self.n_hidden,
                             activation=self.enc_activation)(input_layer)

        z_mean = Dense(name='z_mean', units=self.n_latent)(encode_layer)
        z_log_var = Dense(name='z_log_var', units=self.n_latent)(encode_layer)

        z = Lambda(self._sampling, output_shape=(self.n_latent,))([z_mean, z_log_var])

        # Decode layers
        self._decode_layer = Dense(units=self.n_hidden,
                                   activation=self.dec_activation)(z)

        self._decode_layer = Dense(units=n_inputs, activation='linear')(self._decode_layer)
dnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
gran.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c, granlevels=1):
    # LSTM
    lstm = LSTM(N, return_sequences=True, implementation=2, 
                   kernel_regularizer=l2(c['l2reg']), recurrent_regularizer=l2(c['l2reg']),
                   bias_regularizer=l2(c['l2reg']))
    x1 = inputs[0]
    x2 = inputs[1]
    h1 = lstm(x1)
    h2 = lstm(x2)

    W_x = Dense(N, kernel_initializer='glorot_uniform', use_bias=True, 
                   kernel_regularizer=l2(c['l2reg']))
    W_h = Dense(N, kernel_initializer='orthogonal', use_bias=True,
                   kernel_regularizer=l2(c['l2reg']))
    sigmoid = Activation('sigmoid')
    a1 = multiply([x1, sigmoid( add([W_x(x1), W_h(h1)]) )])
    a2 = multiply([x2, sigmoid( add([W_x(x2), W_h(h2)]) )])

    # Averaging
    avg = Lambda(function=lambda x: K.mean(x, axis=1),
                 output_shape=lambda shape: (shape[0], ) + shape[2:])
    gran1 = avg(a1)
    gran2 = avg(a2)

    return [gran1, gran2], N
dnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
gran.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c, granlevels=1):
    # LSTM
    lstm = LSTM(N, return_sequences=True, implementation=2, 
                   kernel_regularizer=l2(c['l2reg']), recurrent_regularizer=l2(c['l2reg']),
                   bias_regularizer=l2(c['l2reg']))
    x1 = inputs[0]
    x2 = inputs[1]
    h1 = lstm(x1)
    h2 = lstm(x2)

    W_x = Dense(N, kernel_initializer='glorot_uniform', use_bias=True, 
                   kernel_regularizer=l2(c['l2reg']))
    W_h = Dense(N, kernel_initializer='orthogonal', use_bias=True,
                   kernel_regularizer=l2(c['l2reg']))
    sigmoid = Activation('sigmoid')
    a1 = multiply([x1, sigmoid( add([W_x(x1), W_h(h1)]) )])
    a2 = multiply([x2, sigmoid( add([W_x(x2), W_h(h2)]) )])

    # Averaging
    avg = Lambda(function=lambda x: K.mean(x, axis=1),
                 output_shape=lambda shape: (shape[0], ) + shape[2:])
    gran1 = avg(a1)
    gran2 = avg(a2)

    return [gran1, gran2], N
pg.py 文件源码 项目:rl 作者: Shmuma 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def make_model(state_shape, n_actions):
    in_t = Input(shape=(HISTORY_STEPS,) + state_shape, name='input')
    action_t = Input(shape=(1,), dtype='int32', name='action')
    advantage_t = Input(shape=(1,), name='advantage')

    fl_t = Flatten(name='flat')(in_t)
    l1_t = Dense(SIMPLE_L1_SIZE, activation='relu', name='l1')(fl_t)
    l2_t = Dense(SIMPLE_L2_SIZE, activation='relu', name='l2')(l1_t)
    policy_t = Dense(n_actions, name='policy', activation='softmax')(l2_t)

    def loss_func(args):
        p_t, act_t, adv_t = args
        oh_t = K.one_hot(act_t, n_actions)
        oh_t = K.squeeze(oh_t, 1)
        p_oh_t = K.log(1e-6 + K.sum(oh_t * p_t, axis=-1, keepdims=True))
        res_t = adv_t * p_oh_t
        return -res_t

    loss_t = Lambda(loss_func, output_shape=(1,), name='loss')([policy_t, action_t, advantage_t])

    return Model(input=[in_t, action_t, advantage_t], output=[policy_t, loss_t])
decoder.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, latent_dim, data_dim, network_architecture='synthetic'):
        """
        Args:
            latent_dim: int, the flattened dimensionality of the latent space 
            data_dim: int, the flattened dimensionality of the output space (data space)
            network_architecture: str, the architecture name for the body of the Decoder model
        """
        super(Decoder, self).__init__(latent_dim=latent_dim, data_dim=data_dim,
                                      network_architecture=network_architecture,
                                      name='Standard Decoder')

        generator_body = get_network_by_name['decoder'][network_architecture](self.latent_input)

        # NOTE: all decoder layers have names prefixed by `dec`.
        # This is essential for the partial model freezing during training.
        sampler_params = Dense(self.data_dim, activation='sigmoid', name='dec_sampler_params')(generator_body)

        # a probability clipping is necessary for the Bernoulli `log_prob` property produces NaNs in the border cases.
        sampler_params = Lambda(lambda x: 1e-6 + (1 - 2e-6) * x, name='dec_probs_clipper')(sampler_params)

        def bernoulli_log_probs(args):
            from tensorflow.contrib.distributions import Bernoulli
            mu, x = args
            log_px = Bernoulli(probs=mu, name='dec_bernoulli').log_prob(x)
            return log_px

        log_probs = Lambda(bernoulli_log_probs, name='dec_bernoulli_logprob')([sampler_params, self.data_input])

        self.generator = Model(inputs=self.latent_input, outputs=sampler_params, name='dec_sampling')
        self.ll_estimator = Model(inputs=[self.data_input, self.latent_input], outputs=log_probs, name='dec_trainable')
architectures.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def synthetic_adaptive_prior_discriminator(data_dim, latent_dim):
    data_input = Input(shape=(data_dim,), name='disc_internal_data_input')
    # center the data around 0 in [-1, 1] as it is in [0, 1].
    centered_data = Lambda(lambda x: 2 * x - 1, name='disc_centering_data_input')(data_input)
    discriminator_body_data = repeat_dense(centered_data, n_layers=2, n_units=256, name_prefix='disc_body_data')
    theta = Dense(4*256, activation='relu', name='disc_theta')(discriminator_body_data)
    discriminator_body_data_t = repeat_dense(centered_data, n_layers=2, n_units=256, name_prefix='disc_body_data_t')
    discriminator_body_data_t = Dense(1, activation=None, name='disc_data_squash')(discriminator_body_data_t)

    latent_input = Input(shape=(latent_dim,), name='disc_internal_latent_input')
    discriminator_body_latent = repeat_dense(latent_input, n_layers=2, n_units=256, name_prefix='disc_body_latent')
    sigma = Dense(4*256, activation='relu', name='disc_sigma')(discriminator_body_latent)
    discriminator_body_latent_t = repeat_dense(latent_input, n_layers=2, n_units=256, name_prefix='disc_body_latent_t')
    discriminator_body_latent_t = Dense(1, activation=None, name='disc_latent_squash')(discriminator_body_latent_t)

    merged_data_latent = Multiply(name='disc_mul_sigma_theta')([theta, sigma])
    merged_data_latent = Lambda(lambda x: ker.sum(x, axis=-1), name='disc_add_activ_sig_the')(merged_data_latent)
    discriminator_output = Add(name='disc_add_data_latent_t')([discriminator_body_data_t,
                                                               discriminator_body_latent_t,
                                                               merged_data_latent])
    collapsed_noise = Lambda(lambda x: 0.5 * ker.sum(x ** 2, axis=-1), name='disc_noise_addition')(latent_input)
    discriminator_output = Add(name='disc_add_all_toghether')([discriminator_output, collapsed_noise])
    discriminator_model = Model(inputs=[data_input, latent_input], outputs=discriminator_output,
                                name='disc_internal_model')
    return discriminator_model
architectures.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def mnist_adaptive_prior_discriminator(data_dim, latent_dim):
    data_input = Input(shape=(data_dim,), name='disc_internal_data_input')
    # center the data around 0 in [-1, 1] as it is in [0, 1].
    centered_data = Lambda(lambda x: 2 * x - 1, name='disc_centering_data_input')(data_input)
    discriminator_body_data = repeat_dense(centered_data, n_layers=3, n_units=512, name_prefix='disc_body_data')
    theta = Dense(4*512, activation='relu', name='disc_theta')(discriminator_body_data)
    discriminator_body_data_t = repeat_dense(centered_data, n_layers=3, n_units=512, name_prefix='disc_body_data_t')
    discriminator_body_data_t = Dense(1, activation=None, name='disc_data_squash')(discriminator_body_data_t)

    latent_input = Input(shape=(latent_dim,), name='disc_internal_latent_input')
    discriminator_body_latent = repeat_dense(latent_input, n_layers=3, n_units=512, name_prefix='disc_body_latent')
    sigma = Dense(4*512, activation='relu', name='disc_sigma')(discriminator_body_latent)
    discriminator_body_latent_t = repeat_dense(latent_input, n_layers=3, n_units=512, name_prefix='disc_body_latent_t')
    discriminator_body_latent_t = Dense(1, activation=None, name='disc_latent_squash')(discriminator_body_latent_t)

    merged_data_latent = Multiply(name='disc_mul_sigma_theta')([theta, sigma])
    merged_data_latent = Lambda(lambda x: ker.sum(x, axis=-1), name='disc_add_activ_sig_the')(merged_data_latent)
    discriminator_output = Add(name='disc_add_data_latent_t')([discriminator_body_data_t,
                                                               discriminator_body_latent_t,
                                                               merged_data_latent])
    collapsed_noise = Lambda(lambda x:  0.5 * ker.sum(x**2, axis=-1), name='disc_noise_addition')(latent_input)
    discriminator_output = Add(name='disc_add_all_toghether')([discriminator_output, collapsed_noise])
    discriminator_model = Model(inputs=[data_input, latent_input], outputs=discriminator_output,
                                name='disc_internal_model')
    return discriminator_model
encoder.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, data_dim, noise_dim, latent_dim, network_architecture='synthetic', name='encoder'):
        logger.info("Initialising {} model with {}-dimensional data and {}-dimensional noise input "
                    "and {} dimensional latent output".format(name, data_dim, noise_dim, latent_dim))
        self.name = name
        self.data_dim = data_dim
        self.noise_dim = noise_dim
        self.latent_dim = latent_dim
        self.network_architecture = network_architecture
        self.data_input = Input(shape=(data_dim,), name='enc_data_input')

        self.standard_normal_sampler = Lambda(sample_standard_normal_noise, name='enc_standard_normal_sampler')
        self.standard_normal_sampler.arguments = {'data_dim': self.data_dim, 'noise_dim': self.noise_dim,
                                                  'seed': config['seed']}

        self.standard_normal_sampler2 = Lambda(sample_standard_normal_noise, name='enc_standard_normal_sampler2')
        self.standard_normal_sampler2.arguments = {'data_dim': self.data_dim, 'noise_dim': self.noise_dim,
                                                   'seed': config['seed']}
test_model_saving.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_saving_lambda_custom_objects():
    input = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(input)
    output = Dense(3)(x)

    model = Model(input, output)
    model.compile(loss=objectives.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def modelSigmoid(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netSigmoid(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    # Concatenate
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def modelC256P3C256P3C256P3f128_conc_f128(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netC256P3C256P3C256P3f128(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    # Concatenate
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    # Dense
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def modelC256P3C256P3C256P3f128_conc(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netC256P3C256P3C256P3f128(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    # Concatenate
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model


问题


面经


文章

微信
公众号

扫码关注公众号