def biLSTM_encoder2(input, units, dropout = 0.0, recurrent_dropout = 0.0, num_layers = 3, input_dropout = 0.3, output_dropout = 0.3, concat_layers = True):
"""Question and context encoder. Just Bi-LSTM from keras.
Added optional dropout between layers.
Added optional concatenation of each layer outputs into one output representation."""
outputs = [input]
for i in range(num_layers):
rnn_input = outputs[-1]
if input_dropout > 0:
rnn_input = Dropout(rate=input_dropout)(rnn_input)
rnn_output = Bidirectional(LSTM(units=units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=True,
dropout=dropout,
recurrent_dropout = recurrent_dropout,
unroll=False)) (rnn_input)
outputs.append(rnn_output)
# Concat hidden layers
if concat_layers:
output = concatenate(outputs[1:])
else:
output = outputs[-1]
if output_dropout > 0:
output = Dropout(rate=input_dropout)(output)
return output
评论列表
文章目录