def get_training_tensors(self, learning_rate = 0.001, grad_clip = 5):
#-----------------------------------------------------------------------
# Build a loss function
#-----------------------------------------------------------------------
with tf.name_scope('targets-encode'):
y_one_hot = tf.one_hot(self.targets, self.n_classes)
y_reshaped = tf.reshape(y_one_hot, self.logits.get_shape())
with tf.name_scope('loss'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
labels=y_reshaped)
loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', loss)
#-----------------------------------------------------------------------
# Build the optimizer
#-----------------------------------------------------------------------
with tf.name_scope('optimizer'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),
grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return loss, optimizer
评论列表
文章目录