def score(self, y_predicted, y_target, y_prob=None):
""" Compute metrics on classifier predictions
Parameters
----------
y_predicted : np.array [n_samples]
Predicted class labels
y_target : np.array [n_samples]
Target class labels
y_prob : np.array [n_samples] or None, default=None
predicted probabilties. If None, auc is not computed
Returns
-------
scores : dict
dictionary of scores for the following metrics:
accuracy, matthews correlation coefficient, precision, recall, f1,
support, confusion matrix, auc score
"""
labels = set(y_target)
labels.update(y_predicted)
is_binary = len(labels) <= 2
scores = {}
scores['accuracy'] = metrics.accuracy_score(y_target, y_predicted)
if is_binary:
scores['mcc'] = metrics.matthews_corrcoef(y_target, y_predicted)
else:
scores['mcc'] = None
(scores['precision'],
scores['recall'],
scores['f1'],
scores['support']) = metrics.precision_recall_fscore_support(
y_target, y_predicted
)
scores['confusion matrix'] = metrics.confusion_matrix(
y_target, y_predicted, labels=list(labels)
)
if y_prob is not None:
scores['auc score'] = metrics.roc_auc_score(
y_target, y_prob + 1, average='weighted'
)
else:
scores['auc score'] = None
return scores
###############################################################################
评论列表
文章目录