def _get_optimizer(self, training_iters, global_step):
if self.optimizer == "momentum":
learning_rate = self.opt_kwargs.pop("learning_rate", 0.2)
decay_rate = self.opt_kwargs.pop("decay_rate", 0.95)
self.learning_rate_node = tf.train.exponential_decay(learning_rate=learning_rate,
global_step=global_step,
decay_steps=training_iters,
decay_rate=decay_rate,
staircase=True)
optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate_node, momentum=0.9,
**self.opt_kwargs).minimize(self.net.cost,
global_step=global_step)
# optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate_node, momentum=0.9,
# **self.opt_kwargs)
# gvs = optimizer.compute_gradients(self.net.cost)
# # [print(grad) for grad, var in gvs]
# tf.Print(self.net.cost,self.net.cost)
# capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
# train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
elif self.optimizer == "adam":
learning_rate = self.opt_kwargs.pop("learning_rate", 0.001)
self.learning_rate_node = tf.Variable(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_node,
**self.opt_kwargs).minimize(self.net.cost,
global_step=global_step)
return optimizer
评论列表
文章目录