def gru_backward_word_level(self, embedded_words):
"""
:param embedded_words:[batch_size*num_sentences,sentence_length,embed_size]
:return: backward hidden state:a list.length is sentence_length, each element is [batch_size*num_sentences,hidden_size]
"""
# split embedded_words
embedded_words_splitted = tf.split(embedded_words, self.sequence_length,
axis=1) # it is a list,length is sentence_length, each element is [batch_size*num_sentences,1,embed_size]
embedded_words_squeeze = [tf.squeeze(x, axis=1) for x in
embedded_words_splitted] # it is a list,length is sentence_length, each element is [batch_size*num_sentences,embed_size]
embedded_words_squeeze.reverse() # it is a list,length is sentence_length, each element is [batch_size*num_sentences,embed_size]
# demension_1=int(tf.get_shape(embedded_words_squeeze[0])[0]) #h_t = tf.ones([self.batch_size*self.num_sentences, self.hidden_size])
h_t = tf.ones((self.batch_size * self.num_sentences, self.hidden_size))
h_t_backward_list = []
for time_step, Xt in enumerate(embedded_words_squeeze):
h_t = self.gru_single_step_word_level(Xt, h_t)
h_t_backward_list.append(h_t)
h_t_backward_list.reverse() #ADD 2017.06.14
return h_t_backward_list
# forward gru for second level: sentence level
p1_HierarchicalAttention_model.py 文件源码
python
阅读 29
收藏 0
点赞 0
评论 0
评论列表
文章目录