def get_eval_ops(logits, labels, one_hot=False, scope='', calc_accuracy=True):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
print('Evaluation Ops..')
with tf.name_scope(scope):
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label's is was in the top k (here k=1)
# of all logits for that example.
# labels = tf.cast(labels, tf.int64)
if one_hot:
labels = tf.argmax(labels, 1)
top_1_op = tf.nn.in_top_k(logits, labels, 1)
num_correct = tf.reduce_sum(tf.cast(top_1_op, tf.float32))
if calc_accuracy:
acc_percent = tf.divide(num_correct, labels.shape[0].value)
else:
acc_percent = tf.constant(0.0)
# =============
y_const = tf.constant(-1, dtype=labels.dtype)
y_greater = tf.greater(labels, y_const)
n_all = tf.reduce_sum(tf.cast(y_greater, tf.float32))
return top_1_op, acc_percent * 100.0, num_correct, n_all, labels
########################################################################
评论列表
文章目录