python类Input()的实例源码

autoencoder.py 文件源码 项目:keras_lstm_vae 作者: twairball 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def create_lstm_autoencoder(input_dim, timesteps, latent_dim):
    """
    Creates an LSTM Autoencoder (VAE). Returns Autoencoder, Encoder, Generator. 
    (All code by fchollet - see reference.)

    # Arguments
        input_dim: int.
        timesteps: int, input timestep dimension.
        latent_dim: int, latent z-layer shape. 

    # References
        - [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
    """

    inputs = Input(shape=(timesteps, input_dim,))
    encoded = LSTM(latent_dim)(inputs)

    decoded = RepeatVector(timesteps)(encoded)
    decoded = LSTM(input_dim, return_sequences=True)(decoded)

    sequence_autoencoder = Model(inputs, decoded)
    encoder = Model(inputs, encoded)

    autoencoder = Model(inputs, decoded)
    autoencoder.compile(optimizer='adam', loss='mse')
    return autoencoder
MTCNN.py 文件源码 项目:keras-mtcnn 作者: xiangrufan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def create_Kao_Onet( weight_path = 'model48.h5'):
    input = Input(shape = [48,48,3])
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='prelu1')(x)
    x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='prelu3')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
    x = PReLU(shared_axes=[1,2],name='prelu4')(x)
    x = Permute((3,2,1))(x)
    x = Flatten()(x)
    x = Dense(256, name='conv5') (x)
    x = PReLU(name='prelu5')(x)

    classifier = Dense(2, activation='softmax',name='conv6-1')(x)
    bbox_regress = Dense(4,name='conv6-2')(x)
    landmark_regress = Dense(10,name='conv6-3')(x)
    model = Model([input], [classifier, bbox_regress, landmark_regress])
    model.load_weights(weight_path, by_name=True)

    return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def cnn_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        outputs = []
        for i in range(len(self.kernel_sizes)):
            output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
                              kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
            output_i = BatchNormalization()(output_i)
            output_i = Activation('relu')(output_i)
            output_i = GlobalMaxPooling1D()(output_i)
            outputs.append(output_i)

        output = concatenate(outputs, axis=1)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
DNGR.py 文件源码 项目:DNGR-Keras 作者: MdAsifKhan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def model(data, hidden_layers, hidden_neurons, output_file, validation_split=0.9):


    train_n = int(validation_split * len(data))
    batch_size = 50
    train_data = data[:train_n,:]
    val_data = data[train_n:,:]

    input_sh = Input(shape=(data.shape[1],))
    encoded = noise.GaussianNoise(0.2)(input_sh)
    for i in range(hidden_layers):
        encoded = Dense(hidden_neurons[i], activation='relu')(encoded)
        encoded = noise.GaussianNoise(0.2)(encoded)

    decoded = Dense(hidden_neurons[-2], activation='relu')(encoded)
    for j in range(hidden_layers-3,-1,-1):
        decoded = Dense(hidden_neurons[j], activation='relu')(decoded)
    decoded = Dense(data.shape[1], activation='sigmoid')(decoded)

    autoencoder = Model(input=input_sh, output=decoded)
    autoencoder.compile(optimizer='adadelta', loss='mse')

    checkpointer = ModelCheckpoint(filepath='data/bestmodel' + output_file + ".hdf5", verbose=1, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', patience=15, verbose=1)

    train_generator = DataGenerator(batch_size)
    train_generator.fit(train_data, train_data)
    val_generator = DataGenerator(batch_size)
    val_generator.fit(val_data, val_data)

    autoencoder.fit_generator(train_generator,
        samples_per_epoch=len(train_data),
        nb_epoch=100,
        validation_data=val_generator,
        nb_val_samples=len(val_data),
        max_q_size=batch_size,
        callbacks=[checkpointer, earlystopper])
    enco = Model(input=input_sh, output=encoded)
    enco.compile(optimizer='adadelta', loss='mse')
    reprsn = enco.predict(data)
    return reprsn
test_core.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_activity_regularization():
    from keras.engine import Input, Model

    layer = core.ActivityRegularization(l1=0.01, l2=0.01)

    # test in functional API
    x = Input(shape=(3,))
    z = core.Dense(2)(x)
    y = layer(z)
    model = Model(input=x, output=y)
    model.compile('rmsprop', 'mse', mode='FAST_COMPILE')

    model.predict(np.random.random((2, 3)))

    # test serialization
    model_config = model.get_config()
    model = Model.from_config(model_config)
    model.compile('rmsprop', 'mse')
get_model.py 文件源码 项目:Cat-Segmentation 作者: ardamavi 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_model():

    inputs = Input(shape=(64, 64, 3))

    conv_1 = Conv2D(1, (3, 3), strides=(1, 1), padding='same')(inputs)
    act_1 = Activation('relu')(conv_1)

    conv_2 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(act_1)
    act_2 = Activation('relu')(conv_2)

    deconv_1 = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same')(act_2)
    act_3 = Activation('relu')(deconv_1)

    merge_1 = concatenate([act_3, act_1], axis=3)

    deconv_2 = Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same')(merge_1)
    act_4 = Activation('relu')(deconv_2)

    model = Model(inputs=[inputs], outputs=[act_4])

    model.compile(optimizer='adadelta', loss=dice_coef_loss, metrics=[dice_coef])

    return model
model_entailment.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def define_attention_model(self):
        # Take necessary parts out of the entailment model to get OntoLSTM attention.
        if not self.model:
            raise RuntimeError, "Model not trained yet!"
        # We need just one input to get attention. input_shape_at(0) gives a list with two shapes.
        input_shape = self.model.get_input_shape_at(0)[0]
        input_layer = Input(input_shape[1:], dtype='int32')  # removing batch size
        embedding_layer = None
        encoder_layer = None
        for layer in self.model.layers:
            if layer.name == "embedding":
                embedding_layer = layer
            elif layer.name == "encoder":
                # We need to redefine the OntoLSTM layer with the learned weights and set return attention to True.
                # Assuming we'll want attention values for all words (return_sequences = True)
                encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim,
                                                  output_dim=self.embed_dim, num_senses=self.num_senses,
                                                  num_hyps=self.num_hyps, use_attention=True,
                                                  return_attention=True, return_sequences=True,
                                                  weights=layer.get_weights())
        if not embedding_layer or not encoder_layer:
            raise RuntimeError, "Required layers not found!"
        attention_output = encoder_layer(embedding_layer(input_layer))
        self.attention_model = Model(input=input_layer, output=attention_output)
        self.attention_model.compile(loss="mse", optimizer="sgd")  # Loss and optimizer do not matter!
model_sentences.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_attention(self, C_ind):
    if not self.model:
      raise RuntimeError, "Model not trained!"
    model_embedding = None
    model_weights = None
    for layer in self.model.layers:
      if layer.name.lower() == "embedding":
        model_embedding = layer
      if layer.name.lower() == "sent_lstm":
        model_lstm = layer
    if model_embedding is None or model_lstm is None:
      raise RuntimeError, "Did not find expected layers"
    lstm_weights = model_lstm.get_weights()
    embedding_weights = model_embedding.get_weights()
    embed_in_dim, embed_out_dim = embedding_weights[0].shape
    att_embedding = HigherOrderEmbedding(input_dim=embed_in_dim, output_dim=embed_out_dim, weights=embedding_weights)
    onto_lstm = OntoAttentionLSTM(input_dim=embed_out_dim, output_dim=embed_out_dim, input_length=model_lstm.input_length, num_senses=self.num_senses, num_hyps=self.num_hyps, use_attention=True, return_attention=True, weights=lstm_weights)
    att_input = Input(shape=C_ind.shape[1:], dtype='int32')
    att_sent_rep = att_embedding(att_input)
    att_output = onto_lstm(att_sent_rep)
    att_model = Model(input=att_input, output=att_output)
    att_model.compile(optimizer='adam', loss='mse') # optimizer and loss are not needed since we are not going to train this model.
    C_att = att_model.predict(C_ind)
    print >>sys.stderr, "Got attention values. Input, output shapes:", C_ind.shape, C_att.shape
    return C_att
unet.py 文件源码 项目:lsun_2017 作者: ternaus 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
main.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10):

    h, w, nch = input_shape
    assert h == w, 'expect input shape (h, w, nch), h == w'

    images = Input(shape = (h, h, nch))
    x = Conv2D(64, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(images)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Conv2D(128, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Flatten()(x)
    outputs = Dense(num_output, kernel_initializer = init,
                    activation = 'softmax')(x)

    model = Model(inputs = images, outputs = outputs)
    return model
example_gan_convolutional.py 文件源码 项目:Deep-Learning-with-Keras 作者: PacktPublishing 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def model_generator():
    nch = 256
    g_input = Input(shape=[100])
    H = Dense(nch * 14 * 14)(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    H = dim_ordering_reshape(nch, 14)(H)
    H = UpSampling2D(size=(2, 2))(H)
    H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(1, 1, 1, border_mode='same')(H)
    g_V = Activation('sigmoid')(H)
    return Model(g_input, g_V)
MTCNN.py 文件源码 项目:keras-mtcnn 作者: xiangrufan 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def create_Kao_Rnet (weight_path = 'model24.h5'):
    input = Input(shape=[24, 24, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
    x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
    x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)

    x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)

    x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
    x = Permute((3, 2, 1))(x)
    x = Flatten()(x)
    x = Dense(128, name='conv4')(x)
    x = PReLU( name='prelu4')(x)
    classifier = Dense(2, activation='softmax', name='conv5-1')(x)
    bbox_regress = Dense(4, name='conv5-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
han1.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def HAN1(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    #model = Sequential()
    wordInputs = Input(shape=(MAX_WORDS,), name='word1', dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='emb1')(wordInputs) #Assuming all the sentences have same number of words. Check for input_length again.


    hij = Bidirectional(GRU(WORDGRU, name='gru1', return_sequences=True))(wordEmbedding)


    wordDrop = Dropout(DROPOUTPER, name='drop1')(hij)

    alpha_its, Si = AttentionLayer(name='att1')(wordDrop)   

    v6 = Dense(1, activation="sigmoid", name="dense")(Si)
    #model.add(Dense(1, activation="sigmoid", name="documentOut3"))
    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
gru_avg.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fGRU_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    head = GlobalAveragePooling1D()(hij) 

    v6 = Dense(1, activation="sigmoid", name="dense")(head)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
glove_avg.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fGlove_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    head = GlobalAveragePooling1D()(wordEmbedding) 


    v6 = Dense(1, activation="sigmoid", name="dense")(head)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def lstm_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh',
                                      kernel_regularizer=l2(self.opt['regul_coef_lstm']),
                                      dropout=self.opt['dropout_rate']))(embed_input)

        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def create_lstm_layer(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, self.embedding_dim,))
        inp_dropout = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        outp = LSTM(self.hidden_dim, input_shape=(input_dim, self.embedding_dim,),
                    kernel_regularizer=None,
                    recurrent_regularizer=None,
                    bias_regularizer=None,
                    activity_regularizer=None,
                    recurrent_dropout=self.recdrop_val,
                    dropout=self.inpdrop_val,
                    kernel_initializer=ker_in,
                    recurrent_initializer=rec_in,
                    return_sequences=True)(inp_dropout)
        outp_dropout = Dropout(self.dropout_val)(outp)
        model = Model(inputs=inp, outputs=outp_dropout, name="LSTM_encoder")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def create_lstm_layer_1(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim,  self.embedding_dim,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.hidden_dim,
                                    input_shape=(input_dim, self.embedding_dim,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_encoder")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_lstm_layer_2(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, 2*self.perspective_num,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.aggregation_dim,
                                    input_shape=(input_dim, 2*self.perspective_num,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_enc_persp")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_attention_layer(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="attention_generator")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_attention_layer_f(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_forw")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def create_attention_layer_b(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.ones((1,1)), np.zeros((self.max_sequence_length-1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_back")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def bilstm_woatt_model(self):
        """Define a model with bi-LSTM layers and without attention."""

        input_a = Input(shape=(self.max_sequence_length, self.embedding_dim,))
        input_b = Input(shape=(self.max_sequence_length, self.embedding_dim,))
        lstm_layer = self.create_lstm_layer_last(self.max_sequence_length)
        lstm_last_a = lstm_layer(input_a)
        lstm_last_b = lstm_layer(input_b)

        dist = Lambda(self.cosine_dist, output_shape=self.cosine_dist_output_shape,
                      name="similarity_network")([lstm_last_a, lstm_last_b])

        dense = Dense(1, activation='sigmoid', name='similarity_score',
                      kernel_regularizer=None,
                      bias_regularizer=None,
                      activity_regularizer=None)(dist)

        model = Model([input_a, input_b], dense)

        return model
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_mod5(opt=adam()):
    n = 3 * 1024
    in1 = Input((128,), name='x1')
    x1 = fc_block1(in1, n)
    x1 = fc_identity(x1, n)


    in2 = Input((1024,), name='x2')
    x2 = fc_block1(in2, n)
    x2 = fc_identity(x2, n)

    x = merge([x1, x2], mode='concat', concat_axis=1)

    x = fc_identity(x, n)

    out = Dense(4716, activation='sigmoid', name='output')(x)

    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    # model.summary()
    # plot(model=model, show_shapes=True)
    return model
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_mod6(opt=adam()):
    in1 = Input((128,), name='x1')
    in2 = Input((1024,), name='x2')

    inp = merge([in1, in2], mode='concat', concat_axis=1)

    wide = Dense(4000)(inp)

    deep = Dense(1000, activation='sigmoid')(inp)
    deep = Dense(1000, activation='sigmoid')(deep)
    deep = Dense(4000)(deep)

    out = merge([wide, deep], mode='sum', concat_axis=1)
    out = Dense(4716, activation='sigmoid', name='output')(out)

    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    # model.summary()
    plot(model=model, show_shapes=True)
    return model
puzzle.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def to_configs(states, verbose=True, **kwargs):
    base = setting['base']
    width  = states.shape[1] // base
    height = states.shape[1] // base
    load(width,height)

    def build():
        P = len(setting['panels'])
        states = Input(shape=(height*base,width*base))
        error = build_error(states, height, width, base)

        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        # a, h, w, panel
        matches = K.reshape(matches, [K.shape(states)[0], height * width, -1])
        # a, pos, panel
        matches = K.permute_dimensions(matches, [0,2,1])
        # a, panel, pos
        config = matches * K.arange(height*width,dtype='float')
        config = K.sum(config, axis=-1)
        return Model(states, wrap(states, config))

    model = build()
    return model.predict(states, **kwargs)
lightsout_twisted.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def generate_gpu(configs,**kwargs):
    configs = np.array(configs)
    import math
    size = int(math.sqrt(len(configs[0])))
    base = panels.shape[1]
    dim = base*size

    def build():
        P = 2
        configs = Input(shape=(size*size,))
        _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
        configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
        configs_one_hot = K.reshape(configs_one_hot, [-1,P])
        _panels = K.variable(panels)
        _panels = K.reshape(_panels, [P, base*base])
        states = tf.matmul(configs_one_hot, _panels)
        states = K.reshape(states, [-1, size, size, base, base])
        states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
        states = K.reshape(states, [-1, size*base, size*base, 1])
        states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
        states = K.squeeze(states, -1)
        return Model(configs, wrap(configs, states))

    return preprocess(batch_swirl(build().predict(configs,**kwargs)))
lightsout_twisted.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def generate_gpu2(configs,**kwargs):
    configs = np.array(configs)
    import math
    size = int(math.sqrt(len(configs[0])))
    base = panels.shape[1]
    dim = base*size

    def build():
        P = 2
        configs = Input(shape=(size*size,))
        _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
        configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
        configs_one_hot = K.reshape(configs_one_hot, [-1,P])
        _panels = K.variable(panels)
        _panels = K.reshape(_panels, [P, base*base])
        states = tf.matmul(configs_one_hot, _panels)
        states = K.reshape(states, [-1, size, size, base, base])
        states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
        states = K.reshape(states, [-1, size*base, size*base, 1])
        states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
        states = K.squeeze(states, -1)
        states = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **swirl_args)
        return Model(configs, wrap(configs, states))

    return preprocess(build().predict(configs,**kwargs))
lightsout_twisted.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def to_configs(states, verbose=True, **kwargs):
    base = panels.shape[1]
    dim  = states.shape[1] - pad*2
    size = dim // base

    def build():
        states = Input(shape=(dim+2*pad,dim+2*pad))
        s = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **unswirl_args)
        error = build_errors(s,base,pad,dim,size)
        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        # a, h, w, panel
        matches = K.reshape(matches, [K.shape(states)[0], size * size, -1])
        # a, pos, panel
        config = matches * K.arange(2,dtype='float')
        config = K.sum(config, axis=-1)
        # this is 0,1 configs; for compatibility, we need -1 and 1
        config = - (config - 0.5)*2
        return Model(states, wrap(states, K.round(config)))

    return build().predict(states, **kwargs)
lightsout_digital.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def generate_gpu(configs, **kwargs):
    import math
    size = int(math.sqrt(len(configs[0])))
    base = panels.shape[1]
    dim = base*size

    def build():
        P = 2
        configs = Input(shape=(size*size,))
        _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
        configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
        configs_one_hot = K.reshape(configs_one_hot, [-1,P])
        _panels = K.variable(panels)
        _panels = K.reshape(_panels, [P, base*base])
        states = tf.matmul(configs_one_hot, _panels)
        states = K.reshape(states, [-1, size, size, base, base])
        states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
        states = K.reshape(states, [-1, size*base, size*base])
        return Model(configs, wrap(configs, states))

    return build().predict(np.array(configs),**kwargs)


问题


面经


文章

微信
公众号

扫码关注公众号