python类GRU的实例源码

gru.py 文件源码 项目:LSTM-GRU-CNN-MLP 作者: ansleliu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_model(layers):
    model = Sequential()

    model.add(GRU(input_dim=layers[0], output_dim=layers[1], activation='tanh', return_sequences=True))
    model.add(Dropout(0.15))  # Dropout overfitting

    # model.add(GRU(layers[2],activation='tanh', return_sequences=True))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(GRU(layers[2], activation='tanh', return_sequences=False))
    model.add(Dropout(0.15))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="rmsprop") # Nadam rmsprop
    print "Compilation Time : ", time.time() - start
    return model
understand.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def understand_variable_length_handle():
    """????????? recurrent layer ??????"""
    model = Sequential()
    model.add(GRU(input_dim=256, output_dim=256, return_sequences=True))
    model.compile(loss='mean_squared_error', optimizer='sgd')
    train_x = np.random.randn(100, 78, 256)
    train_y = np.random.randn(100, 78, 256)
    model.fit(train_x, train_y, verbose=0)

    inz_1 = np.random.randn(1, 78, 256)
    rez_1 = model.predict_proba(inz_1, verbose=0)

    inz_2 = np.random.randn(1, 87, 256)
    rez_2 = model.predict_proba(inz_2, verbose=0)

    print()
    print('=========== understand variable length =================')
    print('With `return_sequence=True`')
    print('Input shape is: {}, output shae is {}'.format(inz_1.shape, rez_1.shape))
    print('Input shape is: {}, output shae is {}'.format(inz_2.shape, rez_2.shape))
    print('====================== end =============================')
understand.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def try_variable_length_train():
    """????????

    ?????????? train_x ? train_y ? dtype ? object ???
    ?? shape ???? (100,) ?????????
    """
    model = Sequential()
    model.add(GRU(input_dim=256, output_dim=256, return_sequences=True))
    model.compile(loss='mean_squared_error', optimizer='sgd')

    train_x = []
    train_y = []
    for i in range(100):
        seq_length = np.random.randint(78, 87 + 1)
        sequence = []
        for _ in range(seq_length):
            sequence.append([np.random.randn() for _ in range(256)])

        train_x.append(np.array(sequence))
        train_y.append(np.array(sequence))

    train_x = np.array(train_x)
    train_y = np.array(train_y)

    model.fit(np.array(train_x), np.array(train_y))
test_tasks.py 文件源码 项目:keras-recommendation 作者: sonyisme 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_temporal_clf(self):
        print('temporal classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5,10), 
            classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.validation_accuracy[-1] > 0.9)
test_recurrent.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
base.py 文件源码 项目:motion-classification 作者: matthiasplappert 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        assert isinstance(X, list)  #TODO: this should not be an assert
        assert len(y) > 0
        assert len(X) == len(y)

        X = pad_sequences(X)
        print X.shape, y.shape

        n_features = X.shape[2]
        self.n_labels_ = y.shape[1]
        print n_features, self.n_labels_

        model = Sequential()
        model.add(GRU(n_features, 128))
        model.add(Dropout(0.1))
        model.add(BatchNormalization(128))
        model.add(Dense(128, self.n_labels_))
        model.add(Activation('sigmoid'))

        sgd = opt.SGD(lr=0.005, decay=1e-6, momentum=0., nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode='categorical')

        model.fit(X, y, batch_size=self.n_batch_size, nb_epoch=self.n_epochs, show_accuracy=True)
        self.model_ = model
test_recurrent.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
neon_lstm.py 文件源码 项目:stratosphere-lstm 作者: mendozawow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_lstm(input_shape):
    model = Sequential()
    # model.add(Masking(input_shape=input_shape, mask_value=-1.))
    model.add(Embedding(input_shape[0], 128, input_length=input_shape[1]))

    model.add(Convolution1D(nb_filter=64,
                            filter_length=5,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=4))

    model.add(GRU(128))

    # model.add(GRU(128, return_sequences=False))
    # Add dropout if overfitting
    # model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
dga_lstm.py 文件源码 项目:stratosphere-lstm 作者: mendozawow 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_lstm(input_shape):
    model = Sequential()
    # model.add(Masking(input_shape=input_shape, mask_value=-1.))
    model.add(Embedding(input_shape[0], 128, input_length=input_shape[1]))

    model.add(Convolution1D(nb_filter=64,
                            filter_length=5,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=model.output_shape[1]))

    model.add(Flatten())

    model.add(Dense(128))

    # model.add(GRU(128, return_sequences=False))
    # Add dropout if overfitting
    # model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
test_tasks.py 文件源码 项目:deep-coref 作者: clarkkev 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_temporal_clf(self):
        print('temporal classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 5),
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(GRU(y_train.shape[-1], input_shape=(None, X_train.shape[-1])))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='adadelta')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.history['val_acc'][-1] > 0.9)
classifier.py 文件源码 项目:narrative-prediction 作者: roemmele 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def create_model(self):
        model = Sequential()
        model.add(Embedding(output_dim=self.n_embedding_nodes, input_dim=self.lexicon_size + 1,
                            input_length=self.n_timesteps, mask_zero=True, name='embedding_layer'))
        for layer_num in range(self.n_hidden_layers):
            if layer_num == self.n_hidden_layers - 1:
                return_sequences = False
            else: #add extra hidden layers
                return_sequences = True
            model.add(GRU(output_dim=self.n_hidden_nodes, return_sequences=return_sequences, name='hidden_layer' + str(layer_num + 1)))
        model.add(Dense(output_dim=self.n_output_classes, activation='softmax', name='output_layer'))
        # if emb_weights is not None:
        #     #initialize weights with lm weights
        #     model.layers[0].set_weights(emb_weights) #set embeddings
        # if layer1_weights is not None:
        #     model.layers[1].set_weights(layer1_weights) #set recurrent layer 1         
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        return model
test_tasks.py 文件源码 项目:RecommendationSystem 作者: TURuibo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_temporal_clf(self):
        print('temporal classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5,10), 
            classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.validation_accuracy[-1] > 0.9)
model.py 文件源码 项目:keras-molecules 作者: maxhodak 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
        h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
        h = RepeatVector(max_length, name='repeat_vector')(h)
        h = GRU(501, return_sequences = True, name='gru_1')(h)
        h = GRU(501, return_sequences = True, name='gru_2')(h)
        h = GRU(501, return_sequences = True, name='gru_3')(h)
        return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
test_recurrent.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def rnn_test(f):
    """
    All the recurrent layers share the same interface,
    so we can run through them with a single function.
    """
    f = keras_test(f)
    return pytest.mark.parametrize("layer_class", [
        recurrent.SimpleRNN,
        recurrent.GRU,
        recurrent.LSTM
    ])(f)
adder.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_model(input_size, seq_len, hidden_size):
    """???? seq2seq ??"""
    model = Sequential()
    model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False))
    model.add(Dense(hidden_size, activation="relu"))
    model.add(RepeatVector(seq_len))
    model.add(GRU(hidden_size, return_sequences=True))
    model.add(TimeDistributed(Dense(output_dim=input_size, activation="softmax")))
    model.compile(loss="categorical_crossentropy", optimizer='adam')

    return model
pig_latin.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_model(input_size, seq_len, hidden_size):
    """???? sequence to sequence ??"""
    model = Sequential()
    model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False))
    model.add(Dense(hidden_size, activation="relu"))
    model.add(RepeatVector(seq_len))
    model.add(GRU(hidden_size, return_sequences=True))
    model.add(TimeDistributed(Dense(output_dim=input_size, activation="linear")))
    model.compile(loss="mse", optimizer='adam')

    return model
understand.py 文件源码 项目:soph 作者: Linusp 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def understand_return_sequence():
    """?????? recurrent layer ?? return_sequences ??"""
    model_1 = Sequential()
    model_1.add(GRU(input_dim=256, output_dim=256, return_sequences=True))
    model_1.compile(loss='mean_squared_error', optimizer='sgd')
    train_x = np.random.randn(100, 78, 256)
    train_y = np.random.randn(100, 78, 256)
    model_1.fit(train_x, train_y, verbose=0)

    model_2 = Sequential()
    model_2.add(GRU(input_dim=256, output_dim=256, return_sequences=False))
    model_2.compile(loss='mean_squared_error', optimizer='sgd')
    train_x = np.random.randn(100, 78, 256)
    train_y = np.random.randn(100, 256)
    model_2.fit(train_x, train_y, verbose=0)

    inz = np.random.randn(100, 78, 256)
    rez_1 = model_1.predict_proba(inz, verbose=0)
    rez_2 = model_2.predict_proba(inz, verbose=0)

    print()
    print('=========== understand return_sequence =================')
    print('Input shape is: {}'.format(inz.shape))
    print('Output shape of model with `return_sequences=True`: {}'.format(rez_1.shape))
    print('Output shape of model with `return_sequences=False`: {}'.format(rez_2.shape))
    print('====================== end =============================')
rnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def config(c):
    c['dropout'] = 4/5
    c['dropoutfix_inp'] = 0
    c['dropoutfix_rec'] = 0
    c['l2reg'] = 1e-4

    c['rnnbidi'] = True
    c['rnn'] = GRU
    c['rnnbidi_mode'] = 'sum'
    c['rnnact'] = 'tanh'
    c['rnninit'] = 'glorot_uniform'
    c['sdim'] = 2
    c['rnnlevels'] = 1

    c['project'] = True
    c['pdim'] = 2
    c['pact'] = 'tanh'
    c['pinit'] = 'glorot_uniform'

    # model-external:
    c['inp_e_dropout'] = 4/5
    c['inp_w_dropout'] = 0
    # anssel-specific:
    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 2
rnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def config(c):
    c['dropout'] = 4/5
    c['dropoutfix_inp'] = 0
    c['dropoutfix_rec'] = 0
    c['l2reg'] = 1e-4

    c['rnnbidi'] = True
    c['rnn'] = GRU
    c['rnnbidi_mode'] = 'sum'
    c['rnnact'] = 'tanh'
    c['rnninit'] = 'glorot_uniform'
    c['sdim'] = 2
    c['rnnlevels'] = 1

    c['project'] = True
    c['pdim'] = 2
    c['pact'] = 'tanh'
    c['pinit'] = 'glorot_uniform'

    # model-external:
    c['inp_e_dropout'] = 4/5
    c['inp_w_dropout'] = 0
    # anssel-specific:
    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 2
yt8m_frame_model.py 文件源码 项目:Youtube8mdataset_kagglechallenge 作者: jasonlee27 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def load_model(self, frm_modelweights=''):
        frm_model = Sequential()
        frm_model.add(GRU(2048,
                          input_shape=(None, self.feature_size),
                          return_sequences=True,
                          activation='relu',
                          name='fc1'))
        frm_model.add(Dropout(0.5))
        frm_model.add(GRU(2048,
                          return_sequences=True,
                          activation='relu',
                          name='fc2'))
        frm_model.add(Dropout(0.5))
        frm_model.add(GRU(2048,
                          return_sequences=False,
                          activation='relu',
                          name='fc3'))
        frm_model.add(Dropout(0.5))
        frm_model.add(Dense(self.numclasses, activation='softmax', name='prediction'))
        if frm_modelweights:
            frm_model.load_weights(frm_modelweights, by_name=True)
            print("Frame model loaded with weights from %s." % frm_modelweights)
        else:
            print "Empty frame model loaded."

        return frm_model
load_deepmodels.py 文件源码 项目:Youtube8mdataset_kagglechallenge 作者: jasonlee27 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def load_model(self, frm_modelweights='', frmdiff_modelweights=''):
        frm_model = Sequential()
        frm_model.add(GRU(4096,
                          return_sequences=True,
                          input_dim=self.feature_size,
                          input_length=MAX_FRAMES,
                          activation='relu',
                          name='fc1'))
        frm_model.add(Dropout(0.3))
        frm_model.add(GRU(4096,
                          return_sequences=False,
                          activation='relu',
                          name='fc2'))
        frm_model.add(Dropout(0.3))
        frm_model.add(Dense(self.numclasses, activation='softmax', name='frm_prediction'))
        if frm_modelweights:
            frm_model.load_weights(frm_modelweights, by_name=True)
            print("Frame model loaded with weights from %s." % frm_modelweights)
        else:
            print "Empty frame model loaded."

        '''
        frmdiff_model = Sequential()
        frmdiff_model.add(GRU(4096, input_dim=self.feature_size, activation='relu', name='fc1'))
        frmdiff_model.add(Dropout(0.3))
        frmdiff_model.add(GRU(4096, activation='relu', name='fc2'))
        frmdiff_model.add(Dropout(0.3))
        frmdiff_model.add(Dense(self.numclasses, activation='softmax', name='frmdiff_feature'))

        if frmdiff_modelweights:
            frmdiff_model.load_weights(frmdiff_modelweights, by_name=True)
            print('Frame model loaded with weights from %s.' % frmdiff_modelweights)
        else:
            print "Empty frame model loaded."

        model = Sequential()
        model.add(Merge([frm_model, frmdiff_model], mode='concat'))
        model.add(Dense(self.numclasses, activation='softmax', name='predictions'))
        '''

        return frm_model
test_tasks.py 文件源码 项目:keras-recommendation 作者: sonyisme 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_temporal_reg(self):
        print('temporal regression data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(2,),
            classification=False)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        model = Sequential()
        model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
        model.compile(loss='hinge', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
        self.assertTrue(history.validation_loss[-1] < 0.75)
test_recurrent.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def rnn_test(f):
    """
    All the recurrent layers share the same interface,
    so we can run through them with a single function.
    """
    f = keras_test(f)
    return pytest.mark.parametrize("layer_class", [
        recurrent.SimpleRNN,
        recurrent.GRU,
        recurrent.LSTM
    ])(f)
ntm.py 文件源码 项目:ntm_keras 作者: flomlo 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build(self, input_shape):
        bs, input_length, input_dim = input_shape

        self.controller_input_dim, self.controller_output_dim = controller_input_output_shape(
                input_dim, self.units, self.m_depth, self.n_slots, self.shift_range, self.read_heads,
                self.write_heads)

        # Now that we've calculated the shape of the controller, we have add it to the layer/model.
        if self.controller is None:
            self.controller = Dense(
                name = "controller",
                activation = 'linear',
                bias_initializer = 'zeros',
                units = self.controller_output_dim,
                input_shape = (bs, input_length, self.controller_input_dim))
            self.controller.build(input_shape=(self.batch_size, input_length, self.controller_input_dim))
            self.controller_with_state = False


        # This is a fixed shift matrix
        self.C = _circulant(self.n_slots, self.shift_range)

        self.trainable_weights = self.controller.trainable_weights 

        # We need to declare the number of states we want to carry around.
        # In our case the dimension seems to be 6 (LSTM) or 5 (GRU) or 4 (FF),
        # see self.get_initial_states, those respond to:
        # [old_ntm_output] + [init_M, init_wr, init_ww] +  [init_h] (LSMT and GRU) + [(init_c] (LSTM only))
        # old_ntm_output does not make sense in our world, but is required by the definition of the step function we
        # intend to use.
        # WARNING: What self.state_spec does is only poorly understood,
        # I only copied it from keras/recurrent.py.
        self.states = [None, None, None, None]
        self.state_spec = [InputSpec(shape=(None, self.output_dim)),                            # old_ntm_output
                            InputSpec(shape=(None, self.n_slots, self.m_depth)),                # Memory
                            InputSpec(shape=(None, self.read_heads, self.n_slots)),   # weights_read
                            InputSpec(shape=(None, self.write_heads, self.n_slots))]  # weights_write

        super(NeuralTuringMachine, self).build(input_shape)
rnnA.py 文件源码 项目:keyphrase-extraction 作者: sagarchaturvedi1 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_nets(name):
    if name=='LSTM':
        return recurrent.LSTM
    elif name=='GRU':
        return recurrent.GRU
    else:
        return recurrent.SimpleRNN
rnnB.py 文件源码 项目:keyphrase-extraction 作者: sagarchaturvedi1 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_nets(name):
    if name=='LSTM':
        return recurrent.LSTM
    elif name=='GRU':
        return recurrent.GRU
    else:
        return recurrent.SimpleRNN
lstm.py 文件源码 项目:mars_express 作者: wsteitz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fit(self, x, y):
        input_dim = x.shape[1]
        output_dim = y.shape[1]
        self.x_train = x

        start = len(x) % (self.batch_size * self.sequence_length)

        x_seq = self.sliding_window(x.iloc[start:])
        y_seq = self.sliding_window(y.iloc[start:])

        model = Sequential()
        model.add(GRU(1024, batch_input_shape=(self.batch_size, self.sequence_length, input_dim), return_sequences=True, stateful=True))
        model.add(Activation("tanh"))
        model.add(GRU(1024, return_sequences=True))
        model.add(Activation("tanh"))
        model.add(GRU(512, return_sequences=True))
        model.add(Activation("tanh"))
        #model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(output_dim)))
        model.add(Activation("linear"))

        optimizer = keras.optimizers.RMSprop(lr=0.002)
        optimizer = keras.optimizers.Nadam(lr=0.002)
        model.compile(loss='mse', optimizer=optimizer)

        model.fit(x_seq, y_seq, batch_size=self.batch_size, verbose=1, nb_epoch=self.n_epochs, shuffle=False)
        self.model = model
        return self
model.py 文件源码 项目:headline-generation 作者: sallamander 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def make_model(embedding_weights, input_length=50):
    """Build an recurrent net based off the input parameters and return it compiled.

    Args: 
    ----
        embedding_weights: 2d np.ndarray
        input_length (optional): int
            Holds how many words each article body will hold

    Return: 
    ------
        model: keras.model.Sequential compiled model
    """

    dict_size = embedding_weights.shape[0] # Num words in corpus
    embedding_dim = embedding_weights.shape[1] # Num dims in vec representation

    bodies = Input(shape=(input_length,), dtype='int32') 
    embeddings = Embedding(input_dim=dict_size, output_dim=embedding_dim,
                           weights=[embedding_weights], dropout=0.5)(bodies)
    layer = GRU(1024, return_sequences=True, dropout_W=0.5, dropout_U=0.5)(embeddings)
    layer = GRU(1024, return_sequences=False, dropout_W=0.5, dropout_U=0.5)(layer)
    layer = Dense(dict_size, activation='softmax')(layer)

    model = Model(input=bodies, output=layer)

    model.compile(loss='categorical_crossentropy', optimizer='adagrad')

    return model
DeepRNN.py 文件源码 项目:Conceptors 作者: CogSciUOS 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def buildModel(self):
        '''
        :Description:
            Build neural network model
        '''

        self.model = Sequential()
        self.model.add(Embedding(self.embedding, 16, input_length=self.max_len))
        for l in range(self.layers-1):
            self.model.add(GRU(self.neurons, activation=self.activation, return_sequences=True, dropout_W=self.dropout, dropout_U=self.dropout))

        self.model.add(GRU(self.neurons, activation=self.activation, return_sequences=False, dropout_W=self.dropout, dropout_U=self.dropout))    

        self.model.add(Dense(self.n_songs))
        self.model.add(Activation('softmax'))
models.py 文件源码 项目:cervantes 作者: textclf 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _generate_model(self, lembedding, num_classes=2, unit='gru', rnn_size=128, train_vectors=True):

        model = Sequential()
        if lembedding.vector_box.W is None:
            emb = Embedding(lembedding.vector_box.size,
                            lembedding.vector_box.vector_dim,
                            W_constraint=None)
        else:
            emb = Embedding(lembedding.vector_box.size,
                            lembedding.vector_box.vector_dim,
                            weights=[lembedding.vector_box.W], W_constraint=None)
        emb.trainable = train_vectors
        model.add(emb)
        if unit == 'gru':
            model.add(GRU(rnn_size))
        else:
            model.add(LSTM(rnn_size))
        model.add(Dropout(0.2))
        if num_classes == 2:
            model.add(Dense(1, activation='sigmoid'))
            if self.optimizer is None:
                self.optimizer = 'rmsprop'
            model.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])
        else:
            if self.optimizer is None:
                self.optimizer = 'adam'
            model.add(Dense(num_classes, activation='softmax'))
            model.compile(loss='categorical_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])

        return model


问题


面经


文章

微信
公众号

扫码关注公众号