def inference(X, weights, bias, reuse = None, trainMode = True):
word_vectors = tf.nn.embedding_lookup(WORDS, X)
# [batch_size, 80, 50]
length = GetLength(X)
length_64 = tf.cast(length, tf.int64)
reuse = None if trainMode else True
#if trainMode:
# word_vectors = tf.nn.dropout(word_vectors, 0.5)
with tf.variable_scope("rnn_fwbw", reuse = reuse) as scope:
forward_output, _ = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
word_vectors,
dtype = tf.float32,
sequence_length = length,
scope = "RNN_forward")
backward_output_, _ = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
inputs = tf.reverse_sequence(word_vectors,
length_64,
seq_dim = 1),
dtype = tf.float32,
sequence_length = length,
scope = "RNN_backword")
backward_output = tf.reverse_sequence(backward_output_,
length_64,
seq_dim = 1)
output = tf.concat([forward_output, backward_output], 2)
# [batch_size, 80, 200]
output = tf.reshape(output, [-1, FLAGS.num_hidden * 2])
if trainMode:
output = tf.nn.dropout(output, 0.5)
matricized_unary_scores = tf.matmul(output, weights) + bias
# [batch_size, 80, 4]
unary_scores = tf.reshape(
matricized_unary_scores,
[-1, FLAGS.max_sentence_len, FLAGS.num_tags])
return unary_scores, length
评论列表
文章目录