python类Bidirectional()的实例源码

cpg.py 文件源码 项目:deepcpg 作者: cangermueller 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, inputs):
        x = self._merge_inputs(inputs)

        shape = getattr(x, '_keras_shape')
        replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
        x = kl.TimeDistributed(replicate_model)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Bidirectional(kl.GRU(128, kernel_regularizer=kernel_regularizer,
                                    return_sequences=True),
                             merge_mode='concat')(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
han1.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def HAN1(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    #model = Sequential()
    wordInputs = Input(shape=(MAX_WORDS,), name='word1', dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='emb1')(wordInputs) #Assuming all the sentences have same number of words. Check for input_length again.


    hij = Bidirectional(GRU(WORDGRU, name='gru1', return_sequences=True))(wordEmbedding)


    wordDrop = Dropout(DROPOUTPER, name='drop1')(hij)

    alpha_its, Si = AttentionLayer(name='att1')(wordDrop)   

    v6 = Dense(1, activation="sigmoid", name="dense")(Si)
    #model.add(Dense(1, activation="sigmoid", name="documentOut3"))
    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
gru_avg.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def fGRU_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    head = GlobalAveragePooling1D()(hij) 

    v6 = Dense(1, activation="sigmoid", name="dense")(head)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
model.py 文件源码 项目:Sentiment-Analysis 作者: jasonwu0731 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, n_classes, vocab_size, max_len, num_units=128,
                 useBiDirection=False, useAttention=False, learning_rate=0.001, dropout=0, embedding_size=300):
        self.model = Sequential()
        self.model.add(Embedding(input_dim=vocab_size,
                                 output_dim=embedding_size, input_length=max_len))
        lstm_model = LSTM(num_units, dropout=dropout)
        if useBiDirection:
            lstm_model = Bidirectional(lstm_model)
        if useAttention:
            lstm_model = lstm_model
            print("Attention not implement yet ... ")
        self.model.add(lstm_model)
        self.model.add(Dense(n_classes, activation='softmax'))

        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=learning_rate),
                           metrics=['accuracy'])
lstm_bilstm.py 文件源码 项目:sota_sentiment 作者: jbarnesspain 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5,
                weights=None, train=True):
    model = Sequential()
    if weights != None:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
            weights=[weights],
                    trainable=train))
    else:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
                    trainable=train))
    model.add(Dropout(dropout))
    model.add(Bidirectional(LSTM(lstm_dim)))
    model.add(Dropout(dropout))
    model.add(Dense(output_dim, activation='softmax'))
    if output_dim == 2:
        model.compile('adam', 'binary_crossentropy',
                  metrics=['accuracy'])
    else:
        model.compile('adam', 'categorical_crossentropy',
                  metrics=['accuracy'])
    return model
lstm_bilstm.py 文件源码 项目:sota_sentiment 作者: jbarnesspain 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def print_results(bi, file, out_file, file_type):

    names, results, std_devs, dim = test_embeddings(bi, file, file_type)

    rr = [[u'{0:.3f} \u00B1{1:.3f}'.format(r, s) for r, s in zip(result, std_dev)] for result, std_dev in zip(results, std_devs)]
    table_data = [[name] + result for name, result in zip(names, rr)]
    table = tabulate.tabulate(table_data, headers=['dataset', 'acc', 'prec', 'rec', 'f1'], tablefmt='simple', floatfmt='.3f')

    if out_file:
        with open(out_file, 'a') as f:
            f.write('\n')
            if bi:
                f.write('+++Bidirectional LSTM+++\n')
            else:
                f.write('+++LSTM+++\n')
            f.write(table)
            f.write('\n')
    else:
        print()
        if bi:
            print('Bidirectional LSTM')
        else:
            print('LSTM')
        print(table)
rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build(self) -> None:
        """
        ?????????? ??????.
        """
        inp = Input(shape=(None,))

        emb = Embedding(len(self.grapheme_alphabet), self.emb_dimension)(inp)
        encoded = Bidirectional(self.rnn(self.units1, return_sequences=True, recurrent_dropout=self.dropout))(emb)
        encoded = Dropout(self.dropout)(encoded)
        decoded = TimeDistributed(Dense(self.units2, activation="relu"))(encoded)
        predictions = TimeDistributed(Dense(len(self.phonetic_alphabet), activation="softmax"))(decoded)

        model = Model(inputs=inp, outputs=predictions)
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model
grapheme_rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build(self) -> None:
        """
        ?????????? ??????. 
        """
        inp = Input(shape=(None,))

        emb = Embedding(len(self.grapheme_set), self.emb_dimension)(inp)
        encoded = Bidirectional(self.rnn(self.units, return_sequences=True, recurrent_dropout=self.dropout))(emb)
        encoded = Dropout(self.dropout)(encoded)
        decoded = TimeDistributed(Dense(self.units, activation="relu"))(encoded)
        predictions = TimeDistributed(Dense(3, activation="softmax"))(decoded)

        model = Model(inputs=inp, outputs=predictions)
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model
phoneme_rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(self) -> None:
        """
        ?????????? ??????. 
        """
        inp = Input(shape=(None,))

        emb = Embedding(len(self.phonetic_alphabet), self.emb_dimension)(inp)
        encoded = Bidirectional(self.rnn(self.units, return_sequences=True, recurrent_dropout=self.dropout))(emb)
        encoded = Dropout(self.dropout)(encoded)
        decoded = Bidirectional(self.rnn(self.units, return_sequences=True, recurrent_dropout=self.dropout))(encoded)
        decoded = Dropout(self.dropout)(decoded)
        predictions = TimeDistributed(Dense(3, activation="softmax"))(decoded)

        model = Model(inputs=inp, outputs=predictions)
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model
dna.py 文件源码 项目:deepcpg 作者: cangermueller 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(256, 7,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.recurrent.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
lstm_intent_network.py 文件源码 项目:bot.ai 作者: mlucchini 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, nlp, expressions):
        self.nlp = nlp
        self.all_expressions = expressions
        self.num_features = nlp.vocab.vectors_length
        self.categories = list(set([expression.intent().name for expression in expressions]))

        intents = [expression.intent().name for expression in expressions]
        intents_indices = [self.categories.index(intent) for intent in intents]

        self.x = np.zeros(shape=(len(expressions), DOCUMENT_MAX_NUM_WORDS, self.num_features)).astype('float32')
        self.__init_x(self.x, expressions)

        self.y = np_utils.to_categorical(intents_indices)

        self.model = Sequential()
        self.model.add(Bidirectional(LSTM(int(DOCUMENT_MAX_NUM_WORDS * 1.5)),
                                     input_shape=(DOCUMENT_MAX_NUM_WORDS, self.num_features)))
        self.model.add(Dropout(0.3))
        self.model.add(Dense(len(self.categories), activation='sigmoid'))
        self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model_rnn.py 文件源码 项目:botcycle 作者: D2KLab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def bidirectional_lstm(len_output):
    # sequence_input is a matrix of glove vectors (one for each input word)
    sequence_input = Input(
        shape=(MAX_SEQUENCE_LENGTH, EMBEDDING_DIM,), dtype='float32')
    l_lstm = Bidirectional(LSTM(100))(sequence_input)
    preds = Dense(len_output, activation='softmax')(l_lstm)
    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=[utils.f1_score, 'categorical_accuracy'])

    """
    model.add(Bidirectional(LSTM(shape['nr_hidden'])))
    # dropout to avoid overfitting
    model.add(Dropout(settings['dropout']))
    model.add(Dense(shape['nr_class'], activation='sigmoid'))
    model.compile(optimizer=Adam(lr=settings['lr']), loss='binary_crossentropy',
                metrics=['accuracy'])
    """

    return model
model_rnn.py 文件源码 项目:botcycle 作者: D2KLab 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def bidirectional_gru(len_output):
    # sequence_input is a matrix of glove vectors (one for each input word)
    sequence_input = Input(
        shape=(MAX_SEQUENCE_LENGTH, EMBEDDING_DIM,), dtype='float32')
    l_lstm = Bidirectional(GRU(100))(sequence_input)
    # TODO look call(input_at_t, states_at_t) method, returning (output_at_t, states_at_t_plus_1)
    # also look at switch(condition, then_expression, else_expression) for deciding when to feed previous state
    preds = Dense(len_output, activation='softmax')(l_lstm)
    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=[utils.f1_score, 'categorical_accuracy'])

    return model

# required, see values below
bigrumodel.py 文件源码 项目:CIKM_AnalytiCup_2017 作者: zxth93 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def BiGRU(X_train, y_train, X_test, y_test, gru_units, dense_units, input_shape, \
           batch_size, epochs, drop_out, patience):

    model = Sequential()

    reg = L1L2(l1=0.2, l2=0.2)

    model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer = reg,
                                return_sequences = True),
                                input_shape = input_shape,
                                merge_mode="concat"))

    model.add(BatchNormalization())

    model.add(TimeDistributed(Dense(dense_units, activation='relu')))
    model.add(BatchNormalization())

    model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer=reg,
                                    return_sequences = True),
                             merge_mode="concat"))

    model.add(BatchNormalization())

    model.add(Dense(units=1))

    model.add(GlobalAveragePooling1D())

    print(model.summary())

    early_stopping = EarlyStopping(monitor="val_loss", patience = patience)

    model.compile(loss='mse', optimizer= 'adam')

    history_callback = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,\
              verbose=2, callbacks=[early_stopping], validation_data=[X_test, y_test], shuffle = True)

    return model, history_callback
encoders.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _get_encoder_layer(self):
        if self.encoder_layer is None:
            self.encoder_layer = LSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
                                      return_sequences=self.return_sequences, name="encoder")
            if self.bidirectional:
                self.encoder_layer = Bidirectional(self.encoder_layer, name="encoder")
        return self.encoder_layer
encoders.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def _get_encoder_layer(self):
        if self.encoder_layer is None:
            self.encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
                                                   num_senses=self.num_senses, num_hyps=self.num_hyps,
                                                   use_attention=self.use_attention, consume_less="gpu",
                                                   return_sequences=self.return_sequences, name="onto_lstm")
            if self.bidirectional:
                self.encoder_layer = Bidirectional(self.encoder_layer, name="onto_lstm")
        return self.encoder_layer
model_pp_attachment.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def define_attention_model(self):
        '''
        Take necessary parts out of the model to get OntoLSTM attention.
        '''
        if not self.model:
            raise RuntimeError("Model not trained yet!")
        input_shape = self.model.get_input_shape_at(0)
        input_layer = Input(input_shape[1:], dtype='int32')  # removing batch size
        embedding_layer = None
        encoder_layer = None
        for layer in self.model.layers:
            if layer.name == "embedding":
                embedding_layer = layer
            elif layer.name == "onto_lstm":
                # We need to redefine the OntoLSTM layer with the learned weights and set return attention to True.
                # Assuming we'll want attention values for all words (return_sequences = True)
                if isinstance(layer, Bidirectional):
                    onto_lstm = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
                                                  num_senses=self.num_senses, num_hyps=self.num_hyps,
                                                  use_attention=True, return_attention=True, return_sequences=True,
                                                  consume_less='gpu')
                    encoder_layer = Bidirectional(onto_lstm, weights=layer.get_weights())
                else:
                    encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim,
                                                      output_dim=self.embed_dim, num_senses=self.num_senses,
                                                      num_hyps=self.num_hyps, use_attention=True,
                                                      return_attention=True, return_sequences=True,
                                                      consume_less='gpu', weights=layer.get_weights())
                break
        if not embedding_layer or not encoder_layer:
            raise RuntimeError("Required layers not found!")
        attention_output = encoder_layer(embedding_layer(input_layer))
        self.attention_model = Model(inputs=input_layer, outputs=attention_output)
        print >>sys.stderr, "Attention model summary:"
        self.attention_model.summary()
        self.attention_model.compile(loss="mse", optimizer="sgd")  # Loss and optimizer do not matter!
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def bi_lstm(input_shape, n_classes,layers=2, neurons=80, dropout=0.3):
    """
    just replace ANN by RNNs
    """
    model = Sequential(name='pure_rnn')
    model.add(Bidirectional(LSTM(neurons, return_sequences=False if layers==1 else True, dropout=dropout, recurrent_dropout=dropout), input_shape=input_shape))

    model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout))
    for i in range(layers-1):
        model.add(Bidirectional(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout)))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy])
    return model
gru.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def GRUf(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=False), name='gru1')(wordEmbedding)

    v6 = Dense(1, activation="sigmoid", name="dense")(hij)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
han_avg.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def fhan2_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)


    Si = GlobalAveragePooling1D()(hij)

    wordEncoder = Model(wordInputs, Si)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    Vb = GlobalAveragePooling1D()(hi)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
han.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def han2(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    #print 'in han2 max-nb-words'
    #print MAX_NB_WORDS

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    alpha_its, Si = AttentionLayer(name='att1')(hij)

    #wordDrop = Dropout(DROPOUTPER, name='wordDrop')(Si)

    wordEncoder = Model(wordInputs, Si)
    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    alpha_s, Vb = AttentionLayer(name='att2')(hi)

    #sentDrop = Dropout(DROPOUTPER, name='sentDrop')(Vb)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model, wordEncoder
layers.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def biLSTM_encoder(input, units, dropout, recurrent_dropout, num_layers):
    """Question and context encoder. Just Bi-LSTM from keras."""

    encoder = input
    for i in range(num_layers):
        encoder = Bidirectional(LSTM(units=units,
                                activation='tanh',
                                recurrent_activation='hard_sigmoid',
                                use_bias=True,
                                kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal',
                                bias_initializer='zeros',
                                unit_forget_bias=True,
                                kernel_regularizer=None,
                                recurrent_regularizer=None,
                                bias_regularizer=None,
                                activity_regularizer=None,
                                kernel_constraint=None,
                                recurrent_constraint=None,
                                bias_constraint=None,
                                return_sequences=True,
                                dropout=dropout,
                                recurrent_dropout = recurrent_dropout,
                                unroll=False)) (encoder)

    return encoder
sequence_encoders.py 文件源码 项目:keras-text 作者: raghakot 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_model(self, x):
        for i, n in enumerate(self.hidden_dims):
            is_last_layer = i == len(self.hidden_dims) - 1
            rnn = self.rnn_class(n, return_sequences=not is_last_layer, **self.rnn_kwargs)
            if self.bidirectional:
                x = Bidirectional(rnn)(x)
            else:
                x = rnn(x)
        return x
sequence_encoders.py 文件源码 项目:keras-text 作者: raghakot 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_model(self, x):
        rnn = self.rnn_class(self.encoder_dims, return_sequences=True, **self.rnn_kwargs)
        if self.bidirectional:
            word_activations = Bidirectional(rnn)(x)
        else:
            word_activations = rnn(x)

        attention_layer = AttentionLayer()
        doc_vector = attention_layer(word_activations)
        self.attention_tensor = attention_layer.get_attention_tensor()
        return doc_vector
imdb.py 文件源码 项目:keras-resnet 作者: codekansas 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_lstm_model():
    input = Input(shape=(maxlen, 128))
    output = Bidirectional(LSTM(64, return_sequences=True))(input)
    return Model(input, output)
model.py 文件源码 项目:mycroft 作者: wpm 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, training, sequence_length=None, vocabulary_size=None,
                 train_embeddings=SequentialTextEmbeddingClassifier.TRAIN_EMBEDDINGS,
                 language_model=LANGUAGE_MODEL, rnn_type=RNN_TYPE, rnn_units=RNN_UNITS, bidirectional=BIDIRECTIONAL,
                 dropout=DROPOUT, learning_rate=LEARNING_RATE):
        from keras.models import Sequential
        from keras.layers import Bidirectional, Dense, Dropout, GRU, LSTM
        from keras.optimizers import Adam

        label_names, sequence_length, vocabulary_size = self.parameters_from_training(sequence_length, vocabulary_size,
                                                                                      training, language_model)
        embedder = TextSequenceEmbedder(vocabulary_size, sequence_length, language_model)

        model = Sequential()
        model.add(self.embedding_layer(embedder, sequence_length, train_embeddings, mask_zero=True, name="embedding"))
        rnn_class = {"lstm": LSTM, "gru": GRU}[rnn_type]
        for i, units in enumerate(rnn_units, 1):
            name = "rnn-%d" % i
            return_sequences = i < len(rnn_units)
            if bidirectional:
                rnn = Bidirectional(rnn_class(units, return_sequences=return_sequences), name=name)
            else:
                rnn = rnn_class(units, return_sequences=return_sequences, name=name)
            model.add(rnn)
            model.add(Dropout(dropout, name="dropout-%d" % i))
        model.add(Dense(len(label_names), activation="softmax", name="softmax"))
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"])

        self.rnn_units = rnn_units
        self.bidirectional = bidirectional
        self.dropout = dropout
        super().__init__(model, embedder, label_names)
BiRNN_mnist.py 文件源码 项目:Project101 作者: Wonjuseo 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def Make_model(self):
        model = Sequential()
        model.add(Bidirectional(LSTM(self.n_hidden),input_shape=(n_time,n_in)))
        model.add(Dense(n_out,kernel_initializer=self.weight))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.001,beta_1=0.9,beta_2=0.999),metrics=['accuracy'])
        return model
network.py 文件源码 项目:vie-ner-lstm 作者: pth1993 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def building_ner(num_lstm_layer, num_hidden_node, dropout, time_step, vector_length, output_lenght):
    model = Sequential()
    model.add(Masking(mask_value=0., input_shape=(time_step, vector_length)))
    for i in range(num_lstm_layer-1):
        model.add(Bidirectional(LSTM(units=num_hidden_node, return_sequences=True, dropout=dropout,
                                     recurrent_dropout=dropout)))
    model.add(Bidirectional(LSTM(units=num_hidden_node, return_sequences=True, dropout=dropout,
                                 recurrent_dropout=dropout), merge_mode='concat'))
    model.add(TimeDistributed(Dense(output_lenght)))
    model.add(Activation('softmax'))
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model
utils.py 文件源码 项目:CaptchaVariLength 作者: Slyne 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_simpleCnnRnn(image_shape, max_caption_len,vocab_size):
    image_model = Sequential()
    # image_shape : C,W,H
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    image_model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=image_shape))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(Convolution2D(32, 3, 3))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(MaxPooling2D(pool_size=(2, 2)))
    image_model.add(Dropout(0.25))
    image_model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(Convolution2D(64, 3, 3))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(MaxPooling2D(pool_size=(2, 2)))
    image_model.add(Dropout(0.25))
    image_model.add(Flatten())
    # Note: Keras does automatic shape inference.
    image_model.add(Dense(128))
    image_model.add(RepeatVector(max_caption_len))
    image_model.add(Bidirectional(GRU(output_dim=128, return_sequences=True)))
    #image_model.add(GRU(output_dim=128, return_sequences=True))
    image_model.add(TimeDistributed(Dense(vocab_size)))
    image_model.add(Activation('softmax'))
    return image_model
cpg.py 文件源码 项目:deepcpg 作者: cangermueller 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __call__(self, inputs):
        x = self._merge_inputs(inputs)

        shape = getattr(x, '_keras_shape')
        replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
        x = kl.TimeDistributed(replicate_model)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)


问题


面经


文章

微信
公众号

扫码关注公众号