def _optimize(self):
'''
NOTE: The author said that there was no need for 100 d_iter per 100 iters.
https://github.com/igul222/improved_wgan_training/issues/3
'''
global_step = tf.Variable(0, name='global_step')
lr = self.arch['training']['lr']
b1 = self.arch['training']['beta1']
b2 = self.arch['training']['beta2']
optimizer = tf.train.AdamOptimizer(lr, b1, b2)
trainables = tf.trainable_variables()
g_vars = [v for v in trainables if 'Generator' in v.name or 'y_emb' in v.name]
d_vars = [v for v in trainables if 'Discriminator' in v.name]
# # Debug ===============
# debug(['Generator', 'Discriminator'], [g_vars, d_vars])
# # ============================
with tf.name_scope('Update'):
opt_g = optimizer.minimize(self.loss['l_G'], var_list=g_vars, global_step=global_step)
opt_d = optimizer.minimize(self.loss['l_D'], var_list=d_vars)
return {
'd': opt_d,
'g': opt_g,
'global_step': global_step
}
评论列表
文章目录