def BiGRU(X_train, y_train, X_test, y_test, gru_units, dense_units, input_shape, \
batch_size, epochs, drop_out, patience):
model = Sequential()
reg = L1L2(l1=0.2, l2=0.2)
model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer = reg,
return_sequences = True),
input_shape = input_shape,
merge_mode="concat"))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(dense_units, activation='relu')))
model.add(BatchNormalization())
model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer=reg,
return_sequences = True),
merge_mode="concat"))
model.add(BatchNormalization())
model.add(Dense(units=1))
model.add(GlobalAveragePooling1D())
print(model.summary())
early_stopping = EarlyStopping(monitor="val_loss", patience = patience)
model.compile(loss='mse', optimizer= 'adam')
history_callback = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,\
verbose=2, callbacks=[early_stopping], validation_data=[X_test, y_test], shuffle = True)
return model, history_callback
评论列表
文章目录