def create_two_level_bi_lstm(input_4d, embedding_layer,
max_sentence_length, max_n_analyses, max_word_root_length,
lstm_dim, embedding_dim):
r = Reshape((max_sentence_length * max_n_analyses * max_word_root_length,))
# input_4d = Lambda(lambda x: x, output_shape=lambda s: s)(input_4d)
rr = r(input_4d)
input_embeddings = embedding_layer(rr)
print input_embeddings
r = MaskedReshape(
(max_sentence_length * max_n_analyses, max_word_root_length, embedding_dim),
(max_sentence_length * max_n_analyses, max_word_root_length))
# input_embeddings = Lambda(lambda x: x, output_shape=lambda s: s)(input_embeddings)
rr = r(input_embeddings)
lstm_layer = Bidirectional(LSTM(lstm_dim,
input_shape=(max_word_root_length, embedding_dim)))
td_lstm_layer = TimeDistributed(lstm_layer,
input_shape=(max_word_root_length, embedding_dim))
lstm_layer_output = td_lstm_layer(rr)
lstm_layer_output_relu = Activation('relu')(lstm_layer_output)
print "lstm_layer_output_relu", lstm_layer_output_relu
r = Reshape((max_sentence_length, max_n_analyses, 2 * lstm_dim))
lstm_layer_output_relu = Lambda(lambda x: x, output_shape=lambda s: s)(lstm_layer_output_relu)
lstm_layer_output_relu_reshaped = r(lstm_layer_output_relu)
print "lstm_layer_output_relu_reshaped", lstm_layer_output_relu_reshaped
return input_embeddings, lstm_layer_output_relu_reshaped
maskedreshape.py 文件源码
python
阅读 16
收藏 0
点赞 0
评论 0
评论列表
文章目录