def _init_model(self):
# Create multiple forward lstm cell
cell_fw = rnn.MultiRNNCell(
[rnn.BasicLSTMCell(self._config['hidden_size'])
for _ in range(self._config['num_layers'])])
# Create multiple backward lstm cell
cell_bw = rnn.MultiRNNCell(
[rnn.BasicLSTMCell(self._config['hidden_size'])
for _ in range(self._config['num_layers'])])
inputs = self._input.input_data
# Add dropout layer to the input data
if self._is_training and self._config['keep_prob'] < 1:
intpus = [tf.nn.dropout(single_input, self._config['keep_prob'])
for single_input in inputs]
self._outputs, _, _ = rnn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
# Hidden layer weights => 2*hidden_size because of forward + backward cells
softmax_w = tf.get_variable("softmax_w",
[2*self._config['hidden_size'], self._config['num_classes']])
softmax_b = tf.get_variable("softmax_b", [self._config['num_classes']])
# Linear activation, using rnn inner loop last output
# logit shape: [batch_size, num_classes]
self._logits = tf.matmul(self._outputs[-1], softmax_w) + softmax_b
# Define loss
# Required targets shape: [batch_size, num_classes] (one hot vector)
self._cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=self._logits,
labels=self._input.targets))
# Evaluate model
self._correct_pred = tf.equal(tf.argmax(self._logits, 1),
tf.argmax(self._input.targets, 1))
self.accuracy = tf.reduce_mean(tf.cast(self._correct_pred, tf.float32))
# Define optimizer
self._lr = tf.Variable(0.0, trainable=False)
self._train_op = tf.train.AdamOptimizer(
learning_rate=self._lr).minimize(self._cost)
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
bilstm_model.py 文件源码
python
阅读 24
收藏 0
点赞 0
评论 0
评论列表
文章目录