def eval_metrics_on(predictions, labels, regression = True):
'''
assuming this is a regression task; labels are continuous-valued floats
returns most regression-related scores for the given predictions/targets as a dictionary:
r2, mean_abs_error, mse, rmse, median_absolute_error, explained_variance_score
'''
if len(labels[0])==2: #labels is list of data/labels pairs
labels = np.concatenate([l[1] for l in labels])
if regression:
predictions = predictions[:,0]
r2 = metrics.r2_score(labels, predictions)
mean_abs_error = np.abs(predictions - labels).mean()
mse = ((predictions - labels)**2).mean()
rmse = np.sqrt(mse)
median_absolute_error = metrics.median_absolute_error(labels, predictions) # robust to outliers
explained_variance_score = metrics.explained_variance_score(labels, predictions) # best score = 1, lower is worse
return {'r2':r2, 'mean_abs_error':mean_abs_error, 'mse':mse, 'rmse':rmse,
'median_absolute_error':median_absolute_error, 'explained_variance_score':explained_variance_score, 'main_metric':rmse}
else:
predictions = predictions[:,1]
if labels.max()==1:
auc = metrics.auc(predictions, labels[:,1], reorder=1)
accuracy = np.mean((predictions>0.5)==labels[:,1])
return {'auc':auc, 'accuracy':accuracy, 'main_metric':accuracy}
评论列表
文章目录