def f1_score(logits, targets_pl, one_hot=False):
targets = tf.to_int64(targets_pl)
y_predicted = tf.arg_max(logits, 1)
if one_hot:
y_true = tf.arg_max(targets, 1)
else:
y_true = logits
# get true positives (by multiplying the predicted and actual labels we will only get a 1 if both labels are 1)
tp = tf.count_nonzero(y_predicted * y_true)
# get true negatives (basically the same as tp only the inverse)
tn = tf.count_nonzero((y_predicted - 1) * (y_true - 1))
fp = tf.count_nonzero(y_predicted * (y_true - 1))
fn = tf.count_nonzero((y_predicted - 1) * y_true)
# Calculate accuracy, precision, recall and F1 score.
accuracy = (tp + tn) / (tp + fp + fn + tn)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = (2 * precision * recall) / (precision + recall)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('precision', precision)
tf.summary.scalar('recall', recall)
tf.summary.scalar('f1-score', f1_score)
f1_score = tf.reduce_mean(tf.cast(f1_score, 'float32'), name='f1_score_reduce_mean')
return f1_score
评论列表
文章目录