def display_evaluation_metrics(true_labels, predicted_labels, positive_class=1):
print 'Accuracy:', np.round(
metrics.accuracy_score(true_labels,
predicted_labels),
2)
print 'Precision:', np.round(
metrics.precision_score(true_labels,
predicted_labels,
pos_label=positive_class,
average='binary'),
2)
print 'Recall:', np.round(
metrics.recall_score(true_labels,
predicted_labels,
pos_label=positive_class,
average='binary'),
2)
print 'F1 Score:', np.round(
metrics.f1_score(true_labels,
predicted_labels,
pos_label=positive_class,
average='binary'),
2)
评论列表
文章目录