def build_model(self):
self.build_memory()
self.W = tf.Variable(tf.random_normal([self.edim, self.nwords], stddev=self.init_std))
z = tf.matmul(self.hid[-1], self.W)
self.loss = tf.nn.softmax_cross_entropy_with_logits(z, self.target)
self.lr = tf.Variable(self.current_lr)
self.opt = tf.train.GradientDescentOptimizer(self.lr)
params = [self.A, self.B, self.C, self.T_A, self.T_B, self.W]
grads_and_vars = self.opt.compute_gradients(self.loss,params)
clipped_grads_and_vars = [(tf.clip_by_norm(gv[0], self.max_grad_norm), gv[1]) \
for gv in grads_and_vars]
inc = self.global_step.assign_add(1)
with tf.control_dependencies([inc]):
self.optim = self.opt.apply_gradients(clipped_grads_and_vars)
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
评论列表
文章目录