def inference(self):
"""main computation graph here: 1. embeddding layer, 2.Bi-LSTM layer, 3.max pooling, 4.FC layer 5.softmax """
#1.get emebedding of words in the sentence
self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x) #shape:[None,sentence_length,embed_size]
#2. Bi-lstm layer
output_conv=self.conv_layer_with_recurrent_structure() #shape:[None,sentence_length,embed_size*3]
#2.1 apply nolinearity
#b = tf.get_variable("b", [self.embed_size*3])
#h = tf.nn.relu(tf.nn.bias_add(output_conv, b), "relu")
#3. max pooling
output_pooling=tf.reduce_max(output_conv,axis=1) #shape:[None,embed_size*3]
#4. logits(use linear layer)
with tf.name_scope("dropout"):
h_drop=tf.nn.dropout(output_pooling,keep_prob=self.dropout_keep_prob) #[None,embed_size*3]
with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network.
logits = tf.matmul(h_drop, self.W_projection) + self.b_projection #shape:[batch_size,num_classes]<-----h_drop:[None,embed_size*3];b_projection:[hidden_size*3, self.num_classes]
return logits
p71_TextRCNN_mode2.py 文件源码
python
阅读 33
收藏 0
点赞 0
评论 0
评论列表
文章目录