def create_model(self,):
"""
RNN model creation
Layers include Embedding Layer, 3 LSTM stacked,
Simple Context layer (manually defined),
Time Distributed Layer
"""
length_vocab, embedding_size = self.word2vec.shape
print ("shape of word2vec matrix ", self.word2vec.shape)
model = Sequential()
# TODO: look at mask zero flag
model.add(
Embedding(
length_vocab, embedding_size,
input_length=max_length,
weights=[self.word2vec], mask_zero=True,
name='embedding_layer'
)
)
for i in range(rnn_layers):
lstm = LSTM(rnn_size, return_sequences=True,
name='lstm_layer_%d' % (i + 1)
)
model.add(lstm)
# No drop out added !
model.add(Lambda(self.simple_context,
mask=lambda inputs, mask: mask[:, max_len_desc:],
output_shape=self.output_shape_simple_context_layer,
name='simple_context_layer'))
vocab_size = self.word2vec.shape[0]
model.add(TimeDistributed(Dense(vocab_size,
name='time_distributed_layer')))
model.add(Activation('softmax', name='activation_layer'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
K.set_value(model.optimizer.lr, np.float32(learning_rate))
print (model.summary())
return model
评论列表
文章目录