def build_loss_grad(self, inp, output):
y_gt = inp['y_gt']
y_out = output['y_out']
ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
ce = tf.reduce_sum(ce) / num_ex_f
self.add_loss(ce)
learn_rate = self.get_option('learn_rate')
total_loss = self.get_loss()
self.register_var('loss', total_loss)
eps = self.get_option('adam_eps')
optimizer = tf.train.AdamOptimizer(learn_rate, epsilon=eps)
global_step = tf.Variable(0.0)
self.register_var('step', global_step)
train_step = optimizer.minimize(
total_loss, global_step=global_step)
self.register_var('train_step', train_step)
correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
self.register_var('acc', acc)
pass
评论列表
文章目录