def sentencenet(self, input_tensor, reuse=False):
with tf.variable_scope('sentence_net', reuse=reuse) as scope:
wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
sentence_fc1 = tf.contrib.layers.fully_connected(input_tensor, 2048, weights_regularizer=wd, scope='s_fc1')
#drop_fc1 = tf.nn.dropout(sentence_fc1, self.keep_prob, name='drop_fc1')
sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
sentence_fc2_bn = tf.contrib.layers.batch_norm(sentence_fc2, center=True, scale=True, is_training=self.is_training,
reuse=reuse, decay=0.999, updates_collections=None,
scope='s_fc2_bn')
embed = sentence_fc2_bn/tf.norm(sentence_fc2_bn,axis= -1,keep_dims=True)
self.endpoint['sentence_fc1'] = sentence_fc1
self.endpoint['sentence_fc2'] = embed
return embed
BidirectionNet_conv_ltp.py 文件源码
python
阅读 22
收藏 0
点赞 0
评论 0
评论列表
文章目录