def gru_backward_sentence_level(self, sentence_representation):
"""
:param sentence_representation: [batch_size,num_sentences,hidden_size*2]
:return:forward hidden state: a list,length is num_sentences, each element is [batch_size,hidden_size]
"""
# split embedded_words
sentence_representation_splitted = tf.split(sentence_representation, self.num_sentences,
axis=1) # it is a list.length is num_sentences,each element is [batch_size,1,hidden_size*2]
sentence_representation_squeeze = [tf.squeeze(x, axis=1) for x in
sentence_representation_splitted] # it is a list.length is num_sentences,each element is [batch_size, hidden_size*2]
sentence_representation_squeeze.reverse()
# demension_1 = int(tf.get_shape(sentence_representation_squeeze[0])[0]) # scalar: batch_size
h_t = tf.ones((self.batch_size, self.hidden_size * 2))
h_t_forward_list = []
for time_step, Xt in enumerate(sentence_representation_squeeze): # Xt:[batch_size, hidden_size*2]
h_t = self.gru_single_step_sentence_level(Xt,h_t) # h_t:[batch_size,hidden_size]<---------Xt:[batch_size, hidden_size*2]; h_t:[batch_size, hidden_size*2]
h_t_forward_list.append(h_t)
h_t_forward_list.reverse() #ADD 2017.06.14
return h_t_forward_list # a list,length is num_sentences, each element is [batch_size,hidden_size]
p1_HierarchicalAttention_model_transformer.py 文件源码
python
阅读 35
收藏 0
点赞 0
评论 0
评论列表
文章目录