def init_optimizer(self):
print("setting optimizer..")
# Gradients and SGD update operation for training the model
trainable_params = tf.trainable_variables()
if self.optimizer.lower() == 'adadelta':
self.opt = tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate)
elif self.optimizer.lower() == 'adam':
self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self.optimizer.lower() == 'rmsprop':
self.opt = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
else:
self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
# Compute gradients of loss w.r.t. all trainable variables
gradients = tf.gradients(self.loss, trainable_params)
# Clip gradients by a given maximum_gradient_norm
clip_gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm)
# Update the model
self.updates = self.opt.apply_gradients(
zip(clip_gradients, trainable_params), global_step=self.global_step)
评论列表
文章目录