def inference(self, X, reuse=None, trainMode=True):
word_vectors = tf.nn.embedding_lookup(self.words, X)
length = self.length(X)
length_64 = tf.cast(length, tf.int64)
reuse = None if trainMode else True
if FLAGS.embedding_size_2 > 0:
word_vectors2 = tf.nn.embedding_lookup(self.words2, X)
word_vectors = tf.concat(2, [word_vectors, word_vectors2])
#if trainMode:
# word_vectors = tf.nn.dropout(word_vectors, 0.5)
with tf.variable_scope("rnn_fwbw", reuse=reuse) as scope:
forward_output, _ = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(self.numHidden,
reuse=reuse),
word_vectors,
dtype=tf.float32,
sequence_length=length,
scope="RNN_forward")
backward_output_, _ = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(self.numHidden,
reuse=reuse),
inputs=tf.reverse_sequence(word_vectors,
length_64,
seq_dim=1),
dtype=tf.float32,
sequence_length=length,
scope="RNN_backword")
backward_output = tf.reverse_sequence(backward_output_,
length_64,
seq_dim=1)
output = tf.concat([forward_output, backward_output], 2)
output = tf.reshape(output, [-1, self.numHidden * 2])
if trainMode:
output = tf.nn.dropout(output, 0.5)
matricized_unary_scores = tf.matmul(output, self.W) + self.b
# matricized_unary_scores = tf.nn.log_softmax(matricized_unary_scores)
unary_scores = tf.reshape(
matricized_unary_scores,
[-1, FLAGS.max_sentence_len, self.distinctTagNum])
return unary_scores, length
评论列表
文章目录