def build_train(self, total_loss):
with self.G.as_default():
self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
# can't use opt.minimize because we need to clip the gradients
grads_and_vars = self.opt.compute_gradients(self.loss)
grads_and_vars = [(tf.clip_by_norm(g, self.max_grad_norm), v) for g,v in grads_and_vars]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self.nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
self.train_op = self.opt.apply_gradients(nil_grads_and_vars, name="train_op")
return self.train_op
评论列表
文章目录