python类TimeDistributed()的实例源码

test_keras.py 文件源码 项目:wtte-rnn 作者: ragulpr 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model
finetuning.py 文件源码 项目:DeepMoji 作者: bfelbo 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def change_trainable(layer, trainable, verbose=False):
    """ Helper method that fixes some of Keras' issues with wrappers and
        trainability. Freezes or unfreezes a given layer.

    # Arguments:
        layer: Layer to be modified.
        trainable: Whether the layer should be frozen or unfrozen.
        verbose: Verbosity flag.
    """

    layer.trainable = trainable

    if type(layer) == Bidirectional:
        layer.backward_layer.trainable = trainable
        layer.forward_layer.trainable = trainable

    if type(layer) == TimeDistributed:
        layer.backward_layer.trainable = trainable

    if verbose:
        action = 'Unfroze' if trainable else 'Froze'
        print("{} {}".format(action, layer.name))
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_lstm_td(self):
        np.random.seed(1988)
        input_dim = 2
        input_length = 4
        num_channels = 3

        # Define a model
        model = Sequential()
        model.add(SimpleRNN(num_channels, return_sequences=True, 
                input_shape=(input_length, input_dim),))
        model.add(TimeDistributed(Dense(5)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2 - 0.1 for w in \
                model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', 
                output_blob = 'output')


    # Making sure that giant channel sizes get handled correctly
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_tiny_image_captioning(self):
        # use a conv layer as a image feature branch
        img_input_1 = Input(shape=(16,16,3))
        x = Convolution2D(2,3,3)(img_input_1)
        x = Flatten()(x)
        img_model = Model([img_input_1], [x])

        img_input = Input(shape=(16,16,3))
        x = img_model(img_input)
        x = Dense(8, name = 'cap_dense')(x)
        x = Reshape((1,8), name = 'cap_reshape')(x)

        sentence_input = Input(shape=(5,)) # max_length = 5
        y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
        z = merge([x,y], mode = 'concat', concat_axis = 1, name = 'cap_merge')
        z = LSTM(4, return_sequences = True, name = 'cap_lstm')(z)
        z = TimeDistributed(Dense(8), name = 'cap_timedistributed')(z)

        combined_model = Model([img_input, sentence_input], [z])
        self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
model_sentences.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def train(self, S_ind, C_ind, use_onto_lstm=True, use_attention=True, num_epochs=20,  hierarchical=False, base=2):
    # Predict next word from current synsets
    X = C_ind[:,:-1] if use_onto_lstm else S_ind[:,:-1] # remove the last words' hyps in all sentences
    Y_inds = S_ind[:,1:] # remove the first words in all sentences
    if hierarchical:
      train_targets = self._factor_target_indices(Y_inds, base=base)
    else:
      train_targets = [self._make_one_hot(Y_inds, Y_inds.max() + 1)]
    length = Y_inds.shape[1]
    lstm_outdim = self.word_dim

    num_words = len(self.dp.word_index)
    num_syns = len(self.dp.synset_index)
    input = Input(shape=X.shape[1:], dtype='int32')
    embed_input_dim = num_syns if use_onto_lstm else num_words
    embed_layer = HigherOrderEmbedding(name='embedding', input_dim=embed_input_dim, output_dim=self.word_dim, input_shape=X.shape[1:], mask_zero=True)
    sent_rep = embed_layer(input)
    reg_sent_rep = Dropout(0.5)(sent_rep)
    if use_onto_lstm:
      lstm_out = OntoAttentionLSTM(name='sent_lstm', input_dim=self.word_dim, output_dim=lstm_outdim, input_length=length, num_senses=self.num_senses, num_hyps=self.num_hyps, return_sequences=True, use_attention=use_attention)(reg_sent_rep)
    else:
      lstm_out = LSTM(name='sent_lstm', input_dim=self.word_dim, output_dim=lstm_outdim, input_length=length, return_sequences=True)(reg_sent_rep)
    output_nodes = []
    # Make one node for each factored target
    for target in train_targets:
      node = TimeDistributed(Dense(input_dim=lstm_outdim, output_dim=target.shape[-1], activation='softmax'))(lstm_out)
      output_nodes.append(node)

    model = Model(input=input, output=output_nodes)
    print >>sys.stderr, model.summary()
    early_stopping = EarlyStopping()
    precompile_time = time.time()
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    postcompile_time = time.time()
    print >>sys.stderr, "Model compilation took %d s"%(postcompile_time - precompile_time)
    model.fit(X, train_targets, nb_epoch=num_epochs, validation_split=0.1, callbacks=[early_stopping])
    posttrain_time = time.time()
    print >>sys.stderr, "Training took %d s"%(posttrain_time - postcompile_time)
    concept_reps = model.layers[1].get_weights()
    self.model = model
    return concept_reps
CNN_LSTM.py 文件源码 项目:DeepLearning-OCR 作者: xingjian-f 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_CNN_LSTM(channels, width, height, lstm_output_size, nb_classes):
    model = Sequential()
    # 1 conv
    model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', 
        input_shape=(channels, height, width)))
    model.add(BatchNormalization(mode=0, axis=1))
    # 2 conv
    model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    # 3 conv
    model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
    model.add(BatchNormalization(mode=0, axis=1))
    # 4 conv
    model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    # flaten
    a = model.add(Flatten())
    # 1 dense
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    # 2 dense
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    # lstm
    model.add(RepeatVector(lstm_output_size))
    model.add(LSTM(512, return_sequences=True))
    model.add(TimeDistributed(Dropout(0.5)))
    model.add(TimeDistributed(Dense(nb_classes, activation='softmax')))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=[categorical_accuracy_per_sequence],
                  sample_weight_mode='temporal'
                  )

    return model
model.py 文件源码 项目:keras-molecules 作者: maxhodak 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
        h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
        h = RepeatVector(max_length, name='repeat_vector')(h)
        h = GRU(501, return_sequences = True, name='gru_1')(h)
        h = GRU(501, return_sequences = True, name='gru_2')(h)
        h = GRU(501, return_sequences = True, name='gru_3')(h)
        return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
hypopt.py 文件源码 项目:Kutils 作者: ishank26 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def my_model(X_train, y_train, X_test, y_test):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 3
    ################### Model ################

    ######### begin model ########
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # fc layer
    model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    ########################################################################
    checkpoint = ModelCheckpoint("weights/hypmodel2_maha1_noep{0}_batch{1}_seq_{2}.hdf5".format(
        no_epochs, batch, line_length), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')

    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08,
                      clipvalue={{choice([0, 1, 2, 3, 4, 5, 6, 7])}})
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    history = History()
    # fit model
    model.fit(X_train, y_train, batch_size=batch, nb_epoch=no_epochs,
              validation_split=0.2, callbacks=[history, checkpoint])

    score, acc = model.evaluate(X_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
skopt.py 文件源码 项目:Kutils 作者: ishank26 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def my_model(dropout):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 5
    ################### Model ################
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout(dropout))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    model.add(Reshape((248, 512)))
    # fc layer
    model.add(TimeDistributed(Dense(58, activation='softmax')))
    # model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    # model.layers.pop()
    # model.layers.pop()
    # model.add(Dropout(dropout))
    #model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08)
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    ###load weights####
    return model
adder.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(input_size, seq_len, hidden_size):
    """???? seq2seq ??"""
    model = Sequential()
    model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False))
    model.add(Dense(hidden_size, activation="relu"))
    model.add(RepeatVector(seq_len))
    model.add(GRU(hidden_size, return_sequences=True))
    model.add(TimeDistributed(Dense(output_dim=input_size, activation="softmax")))
    model.compile(loss="categorical_crossentropy", optimizer='adam')

    return model
pig_latin.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_model(input_size, seq_len, hidden_size):
    """???? sequence to sequence ??"""
    model = Sequential()
    model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False))
    model.add(Dense(hidden_size, activation="relu"))
    model.add(RepeatVector(seq_len))
    model.add(GRU(hidden_size, return_sequences=True))
    model.add(TimeDistributed(Dense(output_dim=input_size, activation="linear")))
    model.compile(loss="mse", optimizer='adam')

    return model
lstm.py 文件源码 项目:autolipsync 作者: evgenijkatunov 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def init(self):
        self.model = Sequential()
        self.model.add(Bidirectional(LSTM(126, return_sequences=True), 'sum',
                                     input_shape=(self._max_frames, self._features_count)))
        self.model.add(Dropout(0.5))
        self.model.add(TimeDistributed(Dense(units=self._phonemes_count, activation='softmax')))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='rmsprop',
                           metrics=[metrics.categorical_accuracy])
test_keras.py 文件源码 项目:wtte-rnn 作者: ragulpr 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def model_no_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()
    model.add(TimeDistributed(Dense(2), input_shape=(n_timesteps, n_features)))

    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete').loss_function
    else:
        loss = wtte.loss(kind='continuous').loss_function

    model.compile(loss=loss, optimizer=RMSprop(lr=lr))

    return model
DrugAI-GAN.py 文件源码 项目:DrugAI 作者: Gananath 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def Gen():
    #Generator model
    G = Sequential()
    G.add(TimeDistributed(Dense(x_dash.shape[2]), input_shape=(x_dash.shape[1],x_dash.shape[2])))
    G.add(LSTM(216, return_sequences=True))
    G.add(Dropout(0.3))
    G.add(LSTM(216, return_sequences=True))
    G.add(Dropout(0.3))
    G.add(LSTM(216, return_sequences=True))
    #G.add(BatchNormalization(momentum=0.9))
    G.add(TimeDistributed(Dense(y_dash.shape[2], activation='softmax')))
    G.compile(loss='categorical_crossentropy', optimizer=Adam(lr=2e-4))
    return G
DrugAI-GAN.py 文件源码 项目:DrugAI 作者: Gananath 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def Dis():
    #Discriminator model
    D = Sequential()
    D.add(TimeDistributed(Dense(y_dash.shape[2]), input_shape=(y_dash.shape[1],y_dash.shape[2])))
    D.add(LSTM(216, return_sequences=True))
    D.add(Dropout(0.3))
    D.add(LSTM(60, return_sequences=True))
    D.add(Flatten())
    D.add(Dense(1, activation='sigmoid'))
    D.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001))
    return D
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_tiny_time_distrbuted(self):

        # as the first layer in a model
        model = Sequential()
        model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        self._test_keras_model(model)
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_dense_fused_act_in_td(self):
        np.random.seed(1988)
        x_in = Input(shape=(10,2))
        x = TimeDistributed(Dense(6, activation = 'softmax'))(x_in)
        model = Model(inputs=[x_in], outputs=[x])

        self._test_keras_model(model, input_blob = 'data', output_blob = 'output', delta=1e-4)
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_large_batch_gpu(self):

        batch_size = 2049
        num_channels = 4
        kernel_size = 3

        model = Sequential()
        model.add(TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size)))

        model.set_weights([(np.random.rand(*w.shape)-0.5)*0.2 for w in model.get_weights()])

        self._test_keras_model(model, input_blob='data', output_blob='output', delta=1e-2)
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_tiny_mcrnn_td(self):

        model = Sequential()
        model.add(Conv2D(3,(1,1), input_shape=(2,4,4), padding='same'))
        model.add(AveragePooling2D(pool_size=(2,2)))
        model.add(Reshape((2,3)))
        model.add(TimeDistributed(Dense(5)))

        self._test_keras_model(model)
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_tiny_time_distrbuted(self):

        # as the first layer in a model
        model = Sequential()
        model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        self._test_keras_model(model)
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_dense_fused_act_in_td(self):
        np.random.seed(1988)
        x_in = Input(shape=(10,2))
        x = TimeDistributed(Dense(6, activation = 'softmax'))(x_in)
        model = Model(x_in, x)

        self._test_keras_model(model, input_blob = 'data', output_blob = 'output', delta=1e-2)
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_large_batch_gpu(self):

        batch_size = 2049
        num_channels = 4
        kernel_size = 3

        model = Sequential()
        model.add(TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size)))

        model.set_weights([(np.random.rand(*w.shape)-0.5)/5.0 for w in model.get_weights()])

        self._test_keras_model(model, input_blob='data', output_blob='output', delta=1e-2)
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_tiny_mcrnn_td(self):

        model = Sequential()
        model.add(Convolution2D(3,1,1, input_shape=(2,4,4), border_mode='same'))
        model.add(AveragePooling2D(pool_size=(2,2)))
        model.add(Reshape((2,3)))
        model.add(TimeDistributed(Dense(5)))

        self._test_keras_model(model)
test_wrappers.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_regularizers():
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2, W_regularizer='l1'), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    assert len(model.losses) == 1
sarcasm_detection_model_CNN_LSTM_DNN_2D.py 文件源码 项目:SarcasmDetection 作者: AniSkywalker 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _build_network(self, vocab_size, maxlen, emb_weights=[], hidden_units=256, trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(Embedding(vocab_size, emb_weights.shape[1], input_length=maxlen, weights=[emb_weights],
                            trainable=trainable))

        model.add(Reshape((maxlen,emb_weights.shape[1],1)))

        model.add(BatchNormalization(momentum=0.9))

        # model.add(Convolution2D(int(hidden_units/8), (5,5), kernel_initializer='he_normal', padding='valid', activation='sigmoid'))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Dropout(0.5))
        #
        # model.add(Convolution2D(int(hidden_units/4), (5,5), kernel_initializer='he_normal', padding='valid', activation='sigmoid'))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Dropout(0.5))


        model.add(TimeDistributed(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5, return_sequences=True)))
        model.add(TimeDistributed(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5)))

        model.add(Flatten())

        # model.add(Dense(int(hidden_units/2), kernel_initializer='he_normal', activation='sigmoid'))
        # model.add(Dropout(0.5))
        model.add(Dense(2,activation='softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
multi_task_model.py 文件源码 项目:BetaStock 作者: qweraqq 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        financial_time_series_input = Input(shape=(nb_time_step, dim_data), name='x1')
        lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True, name='lstm_layer1')
        lstm_layer_21 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss1')
        lstm_layer_22 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss2')
        lstm_layer_23 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss3')

        lstm_layer_24 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss4')

        lstm_layer_25 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss5')
        h1 = lstm_layer_1(financial_time_series_input)
        h21 = lstm_layer_21(h1)
        h22 = lstm_layer_22(h1)
        h23 = lstm_layer_23(h1)
        h24 = lstm_layer_24(h1)
        h25 = lstm_layer_25(h1)
        time_series_predictions1 = TimeDistributed(Dense(1), name="p1")(h21)  # custom 1
        time_series_predictions2 = TimeDistributed(Dense(1), name="p2")(h22)  # custom 2
        time_series_predictions3 = TimeDistributed(Dense(1), name="p3")(h23)  # mse
        time_series_predictions4 = TimeDistributed(Dense(1, activation='sigmoid'), name="p4")(h24)  # logloss
        time_series_predictions5 = TimeDistributed(Dense(nb_labels, activation='softmax'), name="p5")(h25)  # cross
        self.model = Model(input=financial_time_series_input,
                           output=[time_series_predictions1, time_series_predictions2,
                                   time_series_predictions3, time_series_predictions4,
                                   time_series_predictions5],
                           name="multi-task deep rnn for financial time series forecasting")
        plot(self.model, to_file='model.png')
lstm.py 文件源码 项目:mars_express 作者: wsteitz 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def fit(self, x, y):
        input_dim = x.shape[1]
        output_dim = y.shape[1]
        self.x_train = x

        start = len(x) % (self.batch_size * self.sequence_length)

        x_seq = self.sliding_window(x.iloc[start:])
        y_seq = self.sliding_window(y.iloc[start:])

        model = Sequential()
        model.add(GRU(1024, batch_input_shape=(self.batch_size, self.sequence_length, input_dim), return_sequences=True, stateful=True))
        model.add(Activation("tanh"))
        model.add(GRU(1024, return_sequences=True))
        model.add(Activation("tanh"))
        model.add(GRU(512, return_sequences=True))
        model.add(Activation("tanh"))
        #model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(output_dim)))
        model.add(Activation("linear"))

        optimizer = keras.optimizers.RMSprop(lr=0.002)
        optimizer = keras.optimizers.Nadam(lr=0.002)
        model.compile(loss='mse', optimizer=optimizer)

        model.fit(x_seq, y_seq, batch_size=self.batch_size, verbose=1, nb_epoch=self.n_epochs, shuffle=False)
        self.model = model
        return self
test_wrappers.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_regularizers():
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2, W_regularizer='l1'), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    assert len(model.losses) == 1
mpi_example_train.py 文件源码 项目:plasma 作者: jnkh 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def get_model(batch_size = 32,num_layers = 2,hidden_units=100,num_output=1,dropout=0.1,timesteps = 100, featurelen=1,is_training=True):

    input_tensor = Input(batch_shape=(batch_size,timesteps,featurelen))
    recurrent_layer = LSTM(hidden_units,return_sequences=True,stateful = True)(input_tensor)
    output_tensor = TimeDistributed(Dense(num_output,activation='linear'))(recurrent_layer)

    model = Model(input =input_tensor,output=output_tensor)
    #model.compile(optimizer=SGD(lr=DUMMY_LR),loss='mse')

    return model
stateful_lstm_example.py 文件源码 项目:plasma 作者: jnkh 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build_model(predict,batch_size,length,featurelen):
    if predict:
        batch_size = length = 1
    model = Sequential()
    model.add(LSTM(10 ,return_sequences=True, batch_input_shape=(batch_size, length , featurelen), stateful=True))
    model.add(Dropout(0.2))
    model.add(LSTM(10 , return_sequences=True,stateful=True))
    model.add(Dropout(0.2))
    model.add(TimeDistributed(Dense( featurelen )))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.reset_states()
    return model


问题


面经


文章

微信
公众号

扫码关注公众号