def train_deep_networks(self, global_step):
# Variables that affect learning rate.
num_batches_per_epoch = self.n_train / self.batch_size
decay_steps = int(num_batches_per_epoch * self.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
self.img_lr = tf.train.exponential_decay(self.initial_learning_rate_img, global_step, decay_steps,
self.learning_rate_decay_factor, staircase=True)
self.img_lr_last = tf.train.exponential_decay(self.initial_learning_rate_img*10, global_step, decay_steps,
self.learning_rate_decay_factor, staircase=True)
self.txt_lr = tf.train.exponential_decay(self.initial_learning_rate_txt, global_step, decay_steps,
self.learning_rate_decay_factor, staircase=True)
self.txt_lr_last = tf.train.exponential_decay(self.initial_learning_rate_txt*10, global_step, decay_steps,
self.learning_rate_decay_factor, staircase=True)
# Compute gradients of deep neural networks,
# without Centers and Binary Codes.
apply_gradient_op_img = tf.train.MomentumOptimizer(learning_rate=self.img_lr, momentum=0.9).minimize(self.total_loss, var_list=self.deep_parameters_img, global_step=global_step)
apply_gradient_op_img_last = tf.train.MomentumOptimizer(learning_rate=self.img_lr*10, momentum=0.9).minimize(self.total_loss, var_list=self.deep_parameters_img_lastlayer, global_step=global_step)
apply_gradient_op_txt = tf.train.MomentumOptimizer(learning_rate=self.txt_lr, momentum=0.9).minimize(self.total_loss, var_list=self.deep_parameters_txt+self.deep_parameters_txt_lastlayer, global_step=global_step)
apply_gradient_op = tf.group(apply_gradient_op_img, apply_gradient_op_img_last, apply_gradient_op_txt)
return apply_gradient_op
评论列表
文章目录