def auc_metric(solution, prediction, task='binary.classification'):
''' Normarlized Area under ROC curve (AUC).
Return Gini index = 2*AUC-1 for binary classification problems.
Should work for a vector of binary 0/1 (or -1/1)"solution" and any discriminant values
for the predictions. If solution and prediction are not vectors, the AUC
of the columns of the matrices are computed and averaged (with no weight).
The same for all classification problems (in fact it treats well only the
binary and multilabel classification problems).'''
#auc = metrics.roc_auc_score(solution, prediction, average=None)
# There is a bug in metrics.roc_auc_score: auc([1,0,0],[1e-10,0,0]) incorrect
label_num=solution.shape[1]
auc=np.empty(label_num)
for k in range(label_num):
r_ = tiedrank(prediction[:,k])
s_ = solution[:,k]
if sum(s_)==0: print('WARNING: no positive class example in class {}'.format(k+1))
npos = sum(s_==1)
nneg = sum(s_<1)
auc[k] = (sum(r_[s_==1]) - npos*(npos+1)/2) / (nneg*npos)
return 2*mvmean(auc)-1
### END CLASSIFICATION METRICS
# ======= Specialized scores ========
# We run all of them for all tasks even though they don't make sense for some tasks
评论列表
文章目录