def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: input tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
# Convert from sparse integer labels in the range [0, NUM_CLASSES)
# to 1-hot dense float vectors (that is we will have batch_size vectors,
# each with NUM_CLASSES values, all of which are 0.0 except there will
# be a 1.0 in the entry corresponding to the label).
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat([indices, labels], 1)
onehot_labels = tf.sparse_to_dense(concated,
tf.shape(logits), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=onehot_labels,
name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar('summary/loss', loss)
return loss
评论列表
文章目录