def loss(logit_tensor, targets_pl, one_hot_labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Targets Placeholder. 1-D tensor of shape [batch_size]
Returns:
Loss tensor of type float.
"""
targets = tf.to_int64(targets_pl)
# calculate the average cross entropy loss across the batch.
if one_hot_labels:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example')
else:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example_sparse')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
tf.add_to_collection('losses', cross_entropy_mean)
tf.summary.scalar('loss', cross_entropy_mean)
return cross_entropy_mean
评论列表
文章目录