def build_sentence_encoder2(vocabulary_size, embeddings_matrix):
"""
build the computational graph for the lstm sentence encoder. Return only the palceholders and tensors
that are called from other methods
"""
sentence_oh_placeholder2 = tf.placeholder(shape=[None, vocabulary_size], dtype=tf.float32,
name="sentence_placeholder")
word_embeddings_matrix2 = tf.get_variable("W_we", # shape=[vocabulary_size, WORD_EMB_SIZE]
initializer=tf.constant(embeddings_matrix, dtype=tf.float32))
sentence_embedded2 = tf.expand_dims(tf.matmul(sentence_oh_placeholder2, word_embeddings_matrix2), 0)
# placeholders for sentence and it's length
sent_lengths2 = tf.placeholder(dtype=tf.int32, name="sent_length_placeholder")
# Forward cell
lstm_fw_cell2 = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
# Backward cell
lstm_bw_cell2 = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
# stack cells together in RNN
outputs2, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell2, lstm_bw_cell2, sentence_embedded2, sent_lengths2,
dtype=tf.float32)
# outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
# both output_fw, output_bw will be a `Tensor` shaped: [batch_size, max_time, cell_fw.output_size]`
# outputs is a (output_forward,output_backwards) tuple. concat them together to receive h vector
lstm_outputs2 = tf.concat(outputs2, 2)[0] # shape: [max_time, 2 * hidden_layer_size ]
final_fw2 = outputs2[0][:, -1, :]
final_bw2 = outputs2[1][:, 0, :]
e_m2 = tf.concat((final_fw2, final_bw2), axis=1)
sentence_words_bow2 = tf.placeholder(tf.float32, [None, len(words_vocabulary)], name="sentence_words_bow")
e_m_with_bow2 = tf.concat([e_m2, sentence_words_bow2], axis=1)
return sentence_oh_placeholder2, sent_lengths2, sentence_words_bow2, lstm_outputs2, e_m_with_bow2
评论列表
文章目录