def get_optimizer(self, learning_rate = 0.001):
with tf.name_scope('loss'):
input_shape = tf.shape(self.inputs)
ones = tf.ones([input_shape[0], input_shape[1]])
loss = tf.contrib.seq2seq.sequence_loss(self.logits, self.targets,
ones)
#-----------------------------------------------------------------------
# Build the optimizer
#-----------------------------------------------------------------------
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients = optimizer.compute_gradients(loss)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) \
for grad, var in gradients if grad is not None]
optimizer_op = optimizer.apply_gradients(capped_gradients)
return optimizer_op, loss
评论列表
文章目录