def print_evaluation_result(clf, bags_test, args):
pred_score = np.array([clf(B.data()) for B in bags_test])
pred_label = np.array([1 if score >= 0 else -1 for score in pred_score])
true_label = np.array([B.y for B in bags_test])
a = accuracy (pred_label, true_label) # accuracy
p = precision(pred_label, true_label) # precision
r = recall (pred_label, true_label) # recall
f = f_score (pred_label, true_label) # F-score
auc = metrics.roc_auc_score((true_label+1)/2, pred_score)
if not args.aucplot:
sys.stdout.write("""# accuracy,precision,recall,f-score,ROC-AUC
{:.3f},{:.3f},{:.3f},{:.3f},{:.3f}\n""".format(a, p, r, f, auc))
sys.stdout.flush()
else:
sys.stdout.write("""# accuracy,precision,recall,f-score,ROC-AUC
# {:.3f},{:.3f},{:.3f},{:.3f},{:.3f}\n""".format(a, p, r, f, auc))
sys.stdout.flush()
np.savetxt(sys.stdout.buffer, np.c_[pred_score, true_label])
评论列表
文章目录