def __init__(self, **kwargs):
"""
:param **kwargs: output_dim=4: output dimension of LSTM layer;
activation_lstm='tanh': activation function for LSTM layers;
activation_dense='relu': activation function for Dense layer;
activation_last='sigmoid': activation function for last layer;
drop_out=0.2: fraction of input units to drop;
np_epoch=10, the number of epoches to train the model. epoch is one forward pass and one backward pass of all the training examples;
batch_size=32: number of samples per gradient update. The higher the batch size, the more memory space you'll need;
loss='mean_square_error': loss function;
optimizer='rmsprop'
"""
self.output_dim = kwargs.get('output_dim', 8)
self.activation_lstm = kwargs.get('activation_lstm', 'relu')
self.activation_dense = kwargs.get('activation_dense', 'relu')
self.activation_last = kwargs.get('activation_last', 'softmax') # softmax for multiple output
self.dense_layer = kwargs.get('dense_layer', 2) # at least 2 layers
self.lstm_layer = kwargs.get('lstm_layer', 2)
self.drop_out = kwargs.get('drop_out', 0.2)
self.nb_epoch = kwargs.get('nb_epoch', 10)
self.batch_size = kwargs.get('batch_size', 100)
self.loss = kwargs.get('loss', 'categorical_crossentropy')
self.optimizer = kwargs.get('optimizer', 'rmsprop')
评论列表
文章目录