python类Embedding()的实例源码

layers.py 文件源码 项目:quora_duplicate 作者: ijinmao 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, sequence_length, nb_words,
                 word_embedding_dim, embedding_matrix):
        self.model = Sequential()
        self.model.add(Embedding(nb_words,
                                 word_embedding_dim,
                                 weights=[embedding_matrix],
                                 input_length=sequence_length,
                                 trainable=False))
layers.py 文件源码 项目:quora_duplicate 作者: ijinmao 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, sequence_length, nb_chars, nb_per_word,
                 embedding_dim, rnn_dim, rnn_unit='gru', dropout=0.0):
        def _collapse_input(x, nb_per_word=0):
            x = K.reshape(x, (-1, nb_per_word))
            return x

        def _unroll_input(x, sequence_length=0, rnn_dim=0):
            x = K.reshape(x, (-1, sequence_length, rnn_dim))
            return x

        if rnn_unit == 'gru':
            rnn = GRU
        else:
            rnn = LSTM
        self.model = Sequential()
        self.model.add(Lambda(_collapse_input,
                              arguments={'nb_per_word': nb_per_word},
                              output_shape=(nb_per_word,),
                              input_shape=(sequence_length, nb_per_word,)))
        self.model.add(Embedding(nb_chars,
                                 embedding_dim,
                                 input_length=nb_per_word,
                                 trainable=True))
        self.model.add(rnn(rnn_dim,
                           dropout=dropout,
                           recurrent_dropout=dropout))
        self.model.add(Lambda(_unroll_input,
                              arguments={'sequence_length': sequence_length,
                                         'rnn_dim': rnn_dim},
                              output_shape=(sequence_length, rnn_dim)))
lstm.py 文件源码 项目:hyperas 作者: maxpumperla 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
cnn_lstm.py 文件源码 项目:hyperas 作者: maxpumperla 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
End2EndModel.py 文件源码 项目:triplets-extraction 作者: zsctju 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def creat_binary_tag_LSTM( sourcevocabsize,targetvocabsize, source_W,input_seq_lenth ,output_seq_lenth ,
    hidden_dim ,emd_dim,loss='categorical_crossentropy',optimizer = 'rmsprop'):
    encoder_a = Sequential()
    encoder_b = Sequential()
    encoder_c = Sequential()
    l_A_embedding = Embedding(input_dim=sourcevocabsize+1,
                        output_dim=emd_dim,
                        input_length=input_seq_lenth,
                        mask_zero=True,
                        weights=[source_W])
    encoder_a.add(l_A_embedding)
    encoder_a.add(Dropout(0.3))
    encoder_b.add(l_A_embedding)
    encoder_b.add(Dropout(0.3))
    encoder_c.add(l_A_embedding)

    Model = Sequential()

    encoder_a.add(LSTM(hidden_dim,return_sequences=True))
    encoder_b.add(LSTM(hidden_dim,return_sequences=True,go_backwards=True))
    encoder_rb = Sequential()
    encoder_rb.add(ReverseLayer2(encoder_b))
    encoder_ab=Merge(( encoder_a,encoder_rb),mode='concat')
    Model.add(encoder_ab)

    decodelayer=LSTMDecoder_tag(hidden_dim=hidden_dim, output_dim=hidden_dim
                                         , input_length=input_seq_lenth,
                                        output_length=output_seq_lenth,
                                        state_input=False,
                                         return_sequences=True)
    Model.add(decodelayer)
    Model.add(TimeDistributedDense(targetvocabsize+1))
    Model.add(Activation('softmax'))
    Model.compile(loss=loss, optimizer=optimizer)
    return Model
common.py 文件源码 项目:nli_generation 作者: jstarc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def make_fixed_embeddings(glove, seq_len):
    glove_mat = np.array(glove.values())
    return Embedding(input_dim = glove_mat.shape[0], output_dim = glove_mat.shape[1], 
                       weights = [glove_mat], trainable = False, input_length  = seq_len)
generative_models.py 文件源码 项目:nli_generation 作者: jstarc 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def baseline_train(noise_examples, hidden_size, noise_dim, glove, hypo_len, version):
    prem_input = Input(shape=(None,), dtype='int32', name='prem_input')
    hypo_input = Input(shape=(hypo_len + 1,), dtype='int32', name='hypo_input')
    noise_input = Input(shape=(1,), dtype='int32', name='noise_input')
    train_input = Input(shape=(None,), dtype='int32', name='train_input')
    class_input = Input(shape=(3,), name='class_input')
    concat_dim = hidden_size + noise_dim + 3
    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, hypo_len + 1)(hypo_input)

    premise_layer = LSTM(output_dim=hidden_size, return_sequences=False,
                            inner_activation='sigmoid', name='premise')(prem_embeddings)

    noise_layer = Embedding(noise_examples, noise_dim,
                            input_length = 1, name='noise_embeddings')(noise_input)
    flat_noise = Flatten(name='noise_flatten')(noise_layer)    
    merged = merge([premise_layer, class_input, flat_noise], mode='concat')
    creative = Dense(concat_dim, name = 'cmerge')(merged)
    fake_merge = Lambda(lambda x:x[0], output_shape=lambda x:x[0])([hypo_embeddings, creative])
    hypo_layer = FeedLSTM(output_dim=concat_dim, return_sequences=True,
                         feed_layer = creative, inner_activation='sigmoid', 
                         name='attention')([fake_merge])

    hs = HierarchicalSoftmax(len(glove), trainable = True, name='hs')([hypo_layer, train_input])
    inputs = [prem_input, hypo_input, noise_input, train_input, class_input]


    model_name = 'version' + str(version)
    model = Model(input=inputs, output=hs, name = model_name)
    model.compile(loss=hs_categorical_crossentropy, optimizer='adam')

    return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def create_gate_positional_model(self, char_cnn_kernel, cnn_kernel,
                                     emb_dim, emb_path, vocab_word,
                                     vocab_word_size, word_maxlen,
                                     vocab_char_size, char_maxlen):
        from aes.layers import Conv1DMask, GatePositional, MaxPooling1DMask
        logger.info('Building gate positional model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=3,
            padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gate = GatePositional()([char_input, word_input])
        final_input = Dense(50)(gate)
        cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=3,
            padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def create_gate_matrix_model(self, char_cnn_kernel, cnn_kernel, emb_dim,
                                 emb_path, vocab_word, vocab_word_size,
                                 word_maxlen, vocab_char_size, char_maxlen):
        from aes.layers import Conv1DMask, GateMatrix, MaxPooling1DMask
        logger.info('Building gate matrix model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=char_cnn_kernel,
            padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gate = GateMatrix()([char_input, word_input])
        final_input = Dense(50)(gate)
        cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=cnn_kernel,
            padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def create_gate_vector_model(self, char_cnn_kernel, cnn_kernel, emb_dim,
                                 emb_path, vocab_word, vocab_word_size,
                                 word_maxlen, vocab_char_size, char_maxlen):
        from aes.layers import Conv1DMask, GateVector, MaxPooling1DMask
        logger.info('Building gate vector model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=char_cnn_kernel,
            padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gate = GateVector()([char_input, word_input])
        final_input = Dense(50)(gate)
        cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=cnn_kernel,
            padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def create_concat_model(self, emb_dim, emb_path, vocab_word,
                            vocab_word_size, word_maxlen, vocab_char_size,
                            char_maxlen):
        from aes.layers import Conv1DMask, MaxPooling1DMask
        from keras.layers import concatenate
        logger.info('Building concatenation model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        merged = concatenate([char_input, word_input], axis=1)
        merged_dropped = Dropout(0.5)(merged)
        final_input = Dense(50)(merged_dropped)
        cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def create_word_cnn_model(self, emb_dim, emb_path, vocab_word,
                              vocab_word_size, word_maxlen):
        from aes.layers import Conv1DMask
        logger.info('Building word CNN model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(word_emb)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def create_word_lstm_model(self, emb_dim, emb_path, vocab_word,
                               vocab_word_size, word_maxlen):
        from keras.layers import LSTM
        logger.info('Building word LSTM model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        lstm = LSTM(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(word_emb)
        dropped = Dropout(0.5)(lstm)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def create_word_gru_model(self, emb_dim, emb_path, vocab_word,
                              vocab_word_size, word_maxlen):
        from keras.layers import GRU
        logger.info('Building word GRU model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gru = GRU(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(word_emb)
        dropped = Dropout(0.5)(gru)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
MemNN_classifier.py 文件源码 项目:ParseLawDocuments 作者: FanhuaandLuomu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def Mem_Model(story_maxlen,query_maxlen,vocab_size):
    input_encoder_m=Input(shape=(story_maxlen,),dtype='int32',name='input_encoder_m')

    x=Embedding(output_dim=64,input_dim=vocab_size,input_length=story_maxlen)(input_encoder_m)

    x=Dropout(0.5)(x)

    question_encoder=Input(shape=(query_maxlen,),dtype='int32',name='question_encoder')

    y=Embedding(output_dim=64,input_dim=vocab_size,input_length=query_maxlen)(question_encoder)

    y=Dropout(0.5)(y)

    z=merge([x,y],mode='dot',dot_axes=[2,2])
    # z=merge([x,y],mode='sum')

    match=Activation('softmax')(z)

    input_encoder_c=Input(shape=(story_maxlen,),dtype='int32',name='input_encoder_c')

    c=Embedding(output_dim=query_maxlen,input_dim=vocab_size,input_length=story_maxlen)(input_encoder_c)

    c=Dropout(0.5)(c)

    response=merge([match,c],mode='sum')

    w=Permute((2,1))(response)

    answer=merge([w,y],mode='concat',concat_axis=-1)

    lstm=LSTM(32)(answer)

    lstm=Dropout(0.5)(lstm)

    main_loss=Dense(50,activation='sigmoid',name='main_output')(lstm)

    model=Model(input=[input_encoder_m,question_encoder,input_encoder_c],output=main_loss)
    return model
vdcnn.py 文件源码 项目:VDCNN 作者: yuhsinliu1993 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(num_filters, num_classes, sequence_max_length=512, num_quantized_chars=71, embedding_size=16, learning_rate=0.001, top_k=3, model_path=None):

    inputs = Input(shape=(sequence_max_length, ), dtype='int32', name='inputs')

    embedded_sent = Embedding(num_quantized_chars, embedding_size, input_length=sequence_max_length)(inputs)

    # First conv layer
    conv = Conv1D(filters=64, kernel_size=3, strides=2, padding="same")(embedded_sent)

    # Each ConvBlock with one MaxPooling Layer
    for i in range(len(num_filters)):
        conv = ConvBlockLayer(get_conv_shape(conv), num_filters[i])(conv)
        conv = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv)

    # k-max pooling (Finds values and indices of the k largest entries for the last dimension)
    def _top_k(x):
        x = tf.transpose(x, [0, 2, 1])
        k_max = tf.nn.top_k(x, k=top_k)
        return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))
    k_max = Lambda(_top_k, output_shape=(num_filters[-1] * top_k,))(conv)

    # 3 fully-connected layer with dropout regularization
    fc1 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(k_max))
    fc2 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(fc1))
    fc3 = Dense(num_classes, activation='softmax')(fc2)

    # define optimizer
    sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=False)
    model = Model(inputs=inputs, outputs=fc3)
    model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])

    if model_path is not None:
        model.load_weights(model_path)

    return model
train.py 文件源码 项目:product-category-classifier 作者: two-tap 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build_text_model(word_index):
  text_input = Input(shape=(MAX_SEQUENCE_LENGTH,))

  embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))

  for word, i in word_index.items():
    embedding_vector = embeddings_index.get(word)

    if embedding_vector is not None:
      # words not found in embedding index will be all-zeros.
      embedding_matrix[i] = embedding_vector[:EMBEDDING_DIM]

  embedding_layer = Embedding(embedding_matrix.shape[0],
                              embedding_matrix.shape[1],
                              weights=[embedding_matrix],
                              input_length=MAX_SEQUENCE_LENGTH)



  x = embedding_layer(text_input)
  x.trainable = False
  x = Conv1D(128, 5, activation='relu')(x)
  x = MaxPooling1D(5)(x)
  x = Conv1D(128, 5, activation='relu')(x)
  x = MaxPooling1D(5)(x)
  x = Flatten()(x)
  x = Dense(1024, activation='relu')(x)

  return x, text_input

##
## Image model
##
lcd.py 文件源码 项目:knowledgeflow 作者: 3rduncle 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def buildEmbedding(self, name):
        weights = self.embedding_params.get('weights')
        assert weights
        self.layers[name] = Embedding(
            weights[0].shape[0],
            weights[0].shape[1],
            weights = weights,
            trainable = self.params.get('embedding_trainable', False),
            name=name
        )
test_embeddings.py 文件源码 项目:keras-recommendation 作者: sonyisme 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def test_unitnorm_constraint(self):
        lookup = Sequential()
        lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm()))
        lookup.add(Flatten())
        lookup.add(Dense(2, 1))
        lookup.add(Activation('sigmoid'))
        lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
        lookup.train(self.X1, np.array([[1], [0]], dtype='int32'))
        norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
        self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
text_classifier.py 文件源码 项目:keras_text_classifier 作者: cdj0311 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def createmodel(self):
        """
        create cnn model structure
        :return: model structure
        """
        max_features = max(self.words.values()) + 1 # input dims
        model = Sequential()
        if self.W is None:
            model.add(Embedding(max_features, self.embedding_length, input_length=self.maxlen, dropout=0.2))
        else:
            model.add(Embedding(max_features, self.layer1_size, weights=[self.W], input_length=self.maxlen, dropout=0.2))

        model.add(Convolution1D(nb_filter=self.nb_filter,
                                filter_length=self.filter_length,
                                border_mode='valid',
                                activation='relu',
                                subsample_length=1))

        model.add(MaxPooling1D(pool_length=model.output_shape[1]))
        model.add(Flatten())
        model.add(Dense(self.hidden_dims))
        model.add(Dropout(0.2))
        model.add(Activation('relu'))
        model.add(Dense(self.nb_classes))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["accuracy"])
        return model


问题


面经


文章

微信
公众号

扫码关注公众号