python类Bidirectional()的实例源码

model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def lstm_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh',
                                      kernel_regularizer=l2(self.opt['regul_coef_lstm']),
                                      dropout=self.opt['dropout_rate']))(embed_input)

        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def create_lstm_layer_1(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim,  self.embedding_dim,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.hidden_dim,
                                    input_shape=(input_dim, self.embedding_dim,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_encoder")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_lstm_layer_2(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, 2*self.perspective_num,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.aggregation_dim,
                                    input_shape=(input_dim, 2*self.perspective_num,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_enc_persp")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_lstm_layer_last(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim,  self.embedding_dim,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.hidden_dim,
                                    input_shape=(input_dim, self.embedding_dim,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=False), merge_mode='concat')(inp_drop)
        dropout = Dropout(self.dropout_val)(bioutp)
        model = Model(inputs=inp, outputs=dropout, name="biLSTM_encoder_last")
        return model
models.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _build_sequence_model(self, sequence_input):
        RNN = GRU if self._rnn_type == 'gru' else LSTM

        def rnn():
            rnn = RNN(units=self._rnn_output_size,
                      return_sequences=True,
                      dropout=self._dropout_rate,
                      recurrent_dropout=self._dropout_rate,
                      kernel_regularizer=self._regularizer,
                      kernel_initializer=self._initializer,
                      implementation=2)
            rnn = Bidirectional(rnn) if self._bidirectional_rnn else rnn
            return rnn

        input_ = sequence_input
        for _ in range(self._rnn_layers):
            input_ = BatchNormalization(axis=-1)(input_)
            rnn_out = rnn()(input_)
            input_ = rnn_out
        time_dist_dense = TimeDistributed(Dense(units=self._vocab_size))(rnn_out)

        return time_dist_dense
finetuning.py 文件源码 项目:DeepMoji 作者: bfelbo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def change_trainable(layer, trainable, verbose=False):
    """ Helper method that fixes some of Keras' issues with wrappers and
        trainability. Freezes or unfreezes a given layer.

    # Arguments:
        layer: Layer to be modified.
        trainable: Whether the layer should be frozen or unfrozen.
        verbose: Verbosity flag.
    """

    layer.trainable = trainable

    if type(layer) == Bidirectional:
        layer.backward_layer.trainable = trainable
        layer.forward_layer.trainable = trainable

    if type(layer) == TimeDistributed:
        layer.backward_layer.trainable = trainable

    if verbose:
        action = 'Unfroze' if trainable else 'Froze'
        print("{} {}".format(action, layer.name))
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_tiny_no_sequence_bidir_random(self,
                                           model_precision=_MLMODEL_FULL_PRECISION):
        np.random.seed(1988)
        input_dim = 1
        input_length = 1
        num_channels = 1
        num_samples = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, 
            implementation = 1, recurrent_activation = 'sigmoid'),
            input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output',
                               model_precision=model_precision)
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_tiny_no_sequence_bidir_random_gpu(self,
                                               model_precision = _MLMODEL_FULL_PRECISION):
        np.random.seed(1988)
        input_dim = 1
        input_length = 1
        num_channels = 1
        num_samples = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, 
                                     implementation = 2, recurrent_activation = 'sigmoid'),
                                input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

                                # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output',
                               model_precision=model_precision)
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_small_no_sequence_bidir_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, 
            implementation = 2, recurrent_activation = 'sigmoid'),
            input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_medium_bidir_random_return_seq_false(self):
        np.random.seed(1988)
        input_dim = 7
        input_length = 5
        num_channels = 10

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, 
                                return_sequences=False, implementation=2, recurrent_activation='sigmoid'),
                                input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob='data', output_blob='output')
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_medium_bidir_random_return_seq_true(self):
        np.random.seed(1988)
        input_dim = 7
        input_length = 5
        num_channels = 10

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, 
                                return_sequences = True, implementation = 2, recurrent_activation = 'sigmoid'),
                                input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

                                # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_tiny_no_sequence_bidir_random(self):
        np.random.seed(1988)
        input_dim = 1
        input_length = 1
        num_channels = 1
        num_samples = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, input_dim = input_dim,
            input_length = input_length, consume_less = 'cpu', inner_activation = 'sigmoid'),
            input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_tiny_no_sequence_bidir_random_gpu(self):
        np.random.seed(1988)
        input_dim = 1
        input_length = 1
        num_channels = 1
        num_samples = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, input_dim = input_dim,
                                     input_length = input_length, consume_less = 'gpu', inner_activation = 'sigmoid'),
                                input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

                                # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_small_no_sequence_bidir_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, input_dim = input_dim,
            input_length = input_length, consume_less = 'gpu', inner_activation = 'sigmoid'),
            input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_medium_bidir_random_return_seq_false(self):
        np.random.seed(1988)
        input_dim = 7
        input_length = 5
        num_channels = 10

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, input_dim=input_dim,
                                     input_length=input_length, return_sequences=False,
                                     consume_less='gpu', inner_activation='sigmoid'),
                                input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob='data', output_blob='output')
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_medium_bidir_random_return_seq_true(self):
        np.random.seed(1988)
        input_dim = 7
        input_length = 5
        num_channels = 10

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, input_dim = input_dim,
                                     input_length = input_length, return_sequences = True,
                                     consume_less = 'gpu', inner_activation = 'sigmoid'),
                                input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

                                # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
train.py 文件源码 项目:temporal-attention 作者: dosht 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main():
    print("\n\nLoading data...")
    data_dir = "/data/translate"
    vocab_size = 20000
    en, fr = prepare_date(data_dir, vocab_size)

    print("\n\nbuilding the model...")
    embedding_size = 64
    hidden_size = 32
    model = Sequential()
    model.add(Embedding(en.max_features, embedding_size, input_length=en.max_length, mask_zero=True))
    model.add(Bidirectional(GRU(hidden_size), merge_mode='sum'))
    model.add(RepeatVector(fr.max_length))
    model.add(GRU(embedding_size))
    model.add(Dense(fr.max_length, activation="softmax"))
    model.compile('rmsprop', 'mse')
    print(model.get_config())

    print("\n\nFitting the model...")
    model.fit(en.examples, fr.examples)

    print("\n\nEvaluation...")
    #TODO
train.py 文件源码 项目:temporal-attention 作者: dosht 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main():
    print("\n\nLoading data...")
    data_dir = "/data/translate"
    vocab_size = 20000
    en, fr = prepare_date(data_dir, vocab_size)

    print("\n\nbuilding the model...")
    embedding_size = 64
    hidden_size = 32
    model = Sequential()
    model.add(Embedding(en.max_features, embedding_size, input_length=en.max_length, mask_zero=True))
    model.add(Bidirectional(GRU(hidden_size), merge_mode='sum'))
    model.add(RepeatVector(fr.max_length))
    model.add(GRU(embedding_size))
    model.add(Dense(fr.max_length, activation="softmax"))
    model.compile('rmsprop', 'mse')
    print(model.get_config())

    print("\n\nFitting the model...")
    model.fit(en.examples, fr.examples)

    print("\n\nEvaluation...")
    #TODO
lstm.py 文件源码 项目:autolipsync 作者: evgenijkatunov 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def init(self):
        self.model = Sequential()
        self.model.add(Bidirectional(LSTM(126, return_sequences=True), 'sum',
                                     input_shape=(self._max_frames, self._features_count)))
        self.model.add(Dropout(0.5))
        self.model.add(TimeDistributed(Dense(units=self._phonemes_count, activation='softmax')))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='rmsprop',
                           metrics=[metrics.categorical_accuracy])
inference.py 文件源码 项目:medaka 作者: nanoporetech 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_model(chunk_size, feature_len, num_classes, gru_size=128):
    """Builds a bidirectional GRU model"""

    model = Sequential()
    # Bidirectional wrapper takes a copy of the first argument and reverses
    #   the direction. Weights are independent between components.
    model.add(Bidirectional(
        GRU(gru_size, activation='tanh', return_sequences=True, name='gru1'),
        input_shape=(chunk_size, feature_len))
    )
    model.add(Bidirectional(
        GRU(gru_size, activation='tanh', return_sequences=True, name='gru2'))
    )
    model.add(Dense(num_classes, activation='softmax', name='classify'))
    return model
NMT.py 文件源码 项目:keras-attention 作者: datalogue 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def simpleNMT(pad_length=100,
              n_chars=105,
              n_labels=6,
              embedding_learnable=False,
              encoder_units=256,
              decoder_units=256,
              trainable=True,
              return_probabilities=False):
    """
    Builds a Neural Machine Translator that has alignment attention
    :param pad_length: the size of the input sequence
    :param n_chars: the number of characters in the vocabulary
    :param n_labels: the number of possible labelings for each character
    :param embedding_learnable: decides if the one hot embedding should be refinable.
    :return: keras.models.Model that can be compiled and fit'ed

    *** REFERENCES ***
    Lee, Jason, Kyunghyun Cho, and Thomas Hofmann. 
    "Neural Machine Translation By Jointly Learning To Align and Translate" 
    """
    input_ = Input(shape=(pad_length,), dtype='float32')
    input_embed = Embedding(n_chars, n_chars,
                            input_length=pad_length,
                            trainable=embedding_learnable,
                            weights=[np.eye(n_chars)],
                            name='OneHot')(input_)

    rnn_encoded = Bidirectional(LSTM(encoder_units, return_sequences=True),
                                name='bidirectional_1',
                                merge_mode='concat',
                                trainable=trainable)(input_embed)

    y_hat = AttentionDecoder(decoder_units,
                             name='attention_decoder_1',
                             output_dim=n_labels,
                             return_probabilities=return_probabilities,
                             trainable=trainable)(rnn_encoded)

    model = Model(inputs=input_, outputs=y_hat)

    return model
model.py 文件源码 项目:SynThai 作者: KenjiroAI 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, hyper_params):
        # Sequential model
        model = Sequential()

        # Embedding layer
        model.add(Embedding(constant.NUM_CHARS, 5,
                            input_length=hyper_params.num_step))

        # LSTM Layer #1
        lstm = LSTM(256, return_sequences=True, unroll=True,
                    dropout=0.1, recurrent_dropout=0.1)

        model.add(Bidirectional(lstm))
        model.add(Dropout(0.1))

        # LSTM Layer #2
        lstm = LSTM(256, return_sequences=True, unroll=True,
                    dropout=0.1, recurrent_dropout=0.1)

        model.add(Bidirectional(lstm))
        model.add(Dropout(0.1))

        # LSTM Layer #3
        lstm = LSTM(128, return_sequences=True, unroll=True,
                    dropout=0.25, recurrent_dropout=0.25)

        model.add(Bidirectional(lstm))
        model.add(Dropout(0.25))

        # RNN
        model.add(TimeDistributed(Dense(constant.NUM_TAGS, activation="softmax"),
                                  input_shape=(hyper_params.num_step, 128)))

        # Optimizer
        optimizer = Adam(hyper_params.learning_rate)

        # Compile
        model.compile(loss="categorical_crossentropy", optimizer=optimizer,
                      metrics=["categorical_accuracy"])

        self.model = model
lstm_model.py 文件源码 项目:BiLSTM-CCM 作者: codedecde 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def create_model(options):
    post = Input(shape=(options['MAX_LEN'],))
    if options['USE_EMBEDDING']:
        embedding = Embedding(output_dim=options['EMBEDDING_DIM'], weights = [options['EMBEDDING_MATRIX']] ,input_dim = len(options['VOCAB']) + 2, mask_zero=True ) 
    else:
        embedding = Embedding(output_dim=options['EMBEDDING_DIM'] ,input_dim = len(options['VOCAB']) + 2, mask_zero=True ) 
    embed_post = embedding(post)
    processed_post = Bidirectional(GRU(options['HIDDEN_DIM'], return_sequences = True))(embed_post)
    output = Dense(len(options['CLASSES_2_IX']), activation='softmax')(processed_post)
    model = Model(inputs = [post], outputs = output)
    adam = Adam(clipnorm=1.)
    model.compile(optimizer=adam, loss='categorical_crossentropy',metrics=['accuracy'])
    model.summary()
    return model
model.py 文件源码 项目:BetaStock 作者: qweraqq 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        news_input = Input(shape=(nb_time_step, dim_data))
        lstm = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                    W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh')
        bi_lstm = Bidirectional(lstm, input_shape=(nb_time_step, dim_data), merge_mode='concat')
        all_news_rep = bi_lstm(news_input)
        news_predictions = Dense(1, activation='linear')(all_news_rep)
        self.model = Model(news_input, news_predictions, name="deep rnn for financial news analysis")
RNN-CNN_feature_extract.py 文件源码 项目:Book_DeepLearning_Practice 作者: wac81 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def text_feature_extract_model1(embedding_size=128, hidden_size=256):
    '''
    this is a model use normal Bi-LSTM and maxpooling extract feature

    examples:
????????? [  1.62172219e-05]
???????? [  1.65377696e-05]
?????,??? [ 1.]
???????? [ 1.]
????????? [  1.76498161e-05]
??????????????16?12?????????? [  1.59666997e-05]
??????????????????? [ 1.]
?????????????? [  1.52662833e-05]
?????????????????????????????????? [ 1.]
???????????????????????????????????????? [  1.52281245e-05]
?????????????????????????? [ 1.]
??????????? [  1.59881820e-05]


    :return:
    '''
    model = Sequential()
    model.add(Embedding(input_dim=max_features,
                        output_dim=embedding_size,
                        input_length=max_seq))
    model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
    model.add(TimeDistributed(Dense(embedding_size/2)))
    model.add(Activation('softplus'))
    model.add(MaxPooling1D(5))
    model.add(Flatten())
    # model.add(Dense(2048, activation='softplus'))
    # model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.summary()
    plot(model, to_file="text_feature_extract_model1.png", show_shapes=True)
    return model
gan.py 文件源码 项目:liveqa2017 作者: codekansas 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_generator(l_var, a_var, embeddings):
    """Builds a question generator model.

    Args:
        l_var: keras tensor, the latent vector input.
        a_var: keras tensor, the answer input.
        embeddings: numpy array, the embeddings to use for knn decoding.
    """

    latent_var = Input(tensor=l_var, name='latent_var_pl')
    answer_var = Input(tensor=a_var, name='gen_answer_pl')
    l_var, a_var = latent_var, answer_var

    RNN_DIMS = 64
    vocab_size, num_embedding_dims = embeddings.shape

    # Computes context of the answer.
    a_lstm = Bidirectional(LSTM(RNN_DIMS, return_sequences=True))
    a_context = a_lstm(a_var)

    # Uses context to formulate a question.
    q_matching_lstm = LSTM(RNN_DIMS, return_sequences=True)
    q_matching_lstm = RecurrentAttention(q_matching_lstm, a_context)
    q_var = q_matching_lstm(l_var)
    q_var = LSTM(RNN_DIMS, return_sequences=True)(q_var)
    q_var = Dense(num_embedding_dims)(q_var)

    # Builds the model from the variables (not compiled).
    model = Model(inputs=[latent_var, answer_var], outputs=[q_var])

    return model
test_wrappers.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_Bidirectional():
    rnn = recurrent.SimpleRNN
    nb_sample = 2
    dim = 2
    timesteps = 2
    output_dim = 2
    for mode in ['sum', 'concat']:
        x = np.random.random((nb_sample, timesteps, dim))
        target_dim = 2 * output_dim if mode == 'concat' else output_dim
        y = np.random.random((nb_sample, target_dim))

        # test with Sequential model
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim),
                                         merge_mode=mode, input_shape=(timesteps, dim)))
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # test config
        model.get_config()
        model = model_from_json(model.to_json())
        model.summary()

        # test stacked bidirectional layers
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim, return_sequences=True),
                                         merge_mode=mode, input_shape=(timesteps, dim)))
        model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # test with functional API
        input = Input((timesteps, dim))
        output = wrappers.Bidirectional(rnn(output_dim), merge_mode=mode)(input)
        model = Model(input, output)
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # Bidirectional and stateful
        input = Input(batch_shape=(1, timesteps, dim))
        output = wrappers.Bidirectional(rnn(output_dim, stateful=True), merge_mode=mode)(input)
        model = Model(input, output)
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)
test_keras.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_bidir(self):
        """
        Test the conversion of a bidirectional layer
        """
        from keras.layers import LSTM
        from keras.layers.wrappers import Bidirectional

        # Create a simple Keras model
        model = Sequential()
        model.add(Bidirectional(LSTM(32, input_dim=32, input_length=10),
                                input_shape=(10, 32)))

        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 4)
        self.assertEquals(input_names[0], spec.description.input[0].name)

        self.assertEquals(32, spec.description.input[1].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.input[2].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.input[3].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.input[4].type.multiArrayType.shape[0])

        self.assertEquals(len(spec.description.output), len(output_names) + 4)
        self.assertEquals(output_names[0], spec.description.output[0].name)
        self.assertEquals(64, spec.description.output[0].type.multiArrayType.shape[0])

        self.assertEquals(32, spec.description.output[1].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[2].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[3].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[4].type.multiArrayType.shape[0])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.biDirectionalLSTM)
        self.assertEquals(len(layer_0.input), 5)
        self.assertEquals(len(layer_0.output), 5)
test_keras2.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_bidir(self):
        """
        Test the conversion of a bidirectional layer
        """
        from keras.layers import LSTM
        from keras.layers.wrappers import Bidirectional

        # Create a simple Keras model
        model = Sequential()
        model.add(Bidirectional(LSTM(32, input_shape=(10, 32)),
                                input_shape=(10, 32)))

        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 4)
        self.assertEquals(input_names[0], spec.description.input[0].name)

        self.assertEquals(32, spec.description.input[1].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.input[2].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.input[3].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.input[4].type.multiArrayType.shape[0])

        self.assertEquals(len(spec.description.output), len(output_names) + 4)
        self.assertEquals(output_names[0], spec.description.output[0].name)
        self.assertEquals(64, spec.description.output[0].type.multiArrayType.shape[0])

        self.assertEquals(32, spec.description.output[1].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[2].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[3].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[4].type.multiArrayType.shape[0])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.biDirectionalLSTM)
        self.assertEquals(len(layer_0.input), 5)
        self.assertEquals(len(layer_0.output), 5)
test_wrappers.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_Bidirectional():
    rnn = recurrent.SimpleRNN
    nb_sample = 2
    dim = 2
    timesteps = 2
    output_dim = 2
    for mode in ['sum', 'concat']:
        x = np.random.random((nb_sample, timesteps, dim))
        target_dim = 2 * output_dim if mode == 'concat' else output_dim
        y = np.random.random((nb_sample, target_dim))

        # test with Sequential model
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim),
                                         merge_mode=mode, input_shape=(timesteps, dim)))
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # test config
        model.get_config()
        model = model_from_json(model.to_json())
        model.summary()

        # test stacked bidirectional layers
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim, return_sequences=True),
                                         merge_mode=mode, input_shape=(timesteps, dim)))
        model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # test with functional API
        input = Input((timesteps, dim))
        output = wrappers.Bidirectional(rnn(output_dim), merge_mode=mode)(input)
        model = Model(input, output)
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # Bidirectional and stateful
        input = Input(batch_shape=(1, timesteps, dim))
        output = wrappers.Bidirectional(rnn(output_dim, stateful=True), merge_mode=mode)(input)
        model = Model(input, output)
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, nb_epoch=1, batch_size=1)


问题


面经


文章

微信
公众号

扫码关注公众号