def precision_recall_at_x_proportion(test_labels, test_predictions, x_proportion=0.01,
return_cutoff=False):
"""Compute precision, recall, F1 for a specified fraction of the test set.
:params list test_labels: true labels on test set
:params list test_predicted: predicted labels on test set
:params float x_proportion: proportion of the test set to flag
:params bool return_cutoff: if True return the cutoff probablility
:returns float precision: fraction correctly flagged
:returns float recall: fraction of the positive class recovered
:returns float f1:
"""
cutoff_index = int(len(test_predictions) * x_proportion)
cutoff_index = min(cutoff_index, len(test_predictions) - 1)
sorted_by_probability = np.sort(test_predictions)[::-1]
cutoff_probability = sorted_by_probability[cutoff_index]
test_predictions_binary = [1 if x > cutoff_probability else 0 for x in test_predictions]
precision, recall, f1, _ = metrics.precision_recall_fscore_support(
test_labels, test_predictions_binary)
# Only interested in metrics for label 1
precision, recall, f1 = precision[1], recall[1], f1[1]
if return_cutoff:
return precision, recall, f1, cutoff_probability
else:
return precision, recall, f1
evaluation.py 文件源码
python
阅读 29
收藏 0
点赞 0
评论 0
评论列表
文章目录