def _load_optimizer(self):
"""
Load the SGD optimizer
:return: None
"""
# loss function
with tf.variable_scope("forward"):
self.loss_fwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_fwd, self.labels,
self.weights, self.vocab_size)
# optimizer
self.optimizer_fwd = tf.train.MomentumOptimizer(self.learning_rate, self.momentum)
self.train_op_fwd = self.optimizer_fwd.minimize(self.loss_fwd)
with tf.variable_scope("backward"):
self.loss_bwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_bwd, self.labels,
self.weights, self.vocab_size)
# optimizer
self.optimizer_bwd = tf.train.MomentumOptimizer(self.learning_rate, self.momentum)
self.train_op_bwd = self.optimizer_bwd.minimize(self.loss_bwd)
stacked_bidirectional.py 文件源码
python
阅读 32
收藏 0
点赞 0
评论 0
评论列表
文章目录