def add_training_op(self):
loss=self.total_loss
opt1=tf.train.AdagradOptimizer(self.config.lr)
opt2=tf.train.AdagradOptimizer(self.config.emb_lr)
ts=tf.trainable_variables()
gs=tf.gradients(loss,ts)
gs_ts=zip(gs,ts)
gt_emb,gt_nn=[],[]
for g,t in gs_ts:
#print t.name,g.name
if "Embed/embedding:0" in t.name:
#g=tf.Print(g,[g.get_shape(),t.get_shape()])
gt_emb.append((g,t))
#print t.name
else:
gt_nn.append((g,t))
#print t.name
train_op1=opt1.apply_gradients(gt_nn)
train_op2=opt2.apply_gradients(gt_emb)
train_op=[train_op1,train_op2]
return train_op
评论列表
文章目录