def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'):
print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))
self.model = Sequential()
self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
for i in range(0, n_layers - 1):
self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal', bias_initializer='zeros',
dropout=rate_dropout, recurrent_dropout=rate_dropout))
self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal', bias_initializer='zeros',
dropout=rate_dropout, recurrent_dropout=rate_dropout))
self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
# self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
# moving_variance_initializer=Constant(value=0.25)))
self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
self.model.add(Activation('relu_limited'))
opt = RMSprop(lr=lr)
self.model.compile(loss=loss,
optimizer=opt,
metrics=['accuracy'])
评论列表
文章目录