def get_metrics(actual_labels_file, predict_labels_file):
util.check_required_program_args([actual_labels_file, predict_labels_file])
actual_labels_df = pd.read_csv(actual_labels_file, names=['image', 'label'], header=0)
predict_labels_df = pd.read_csv(predict_labels_file, names=['image', 'label'], header=0)
# assumes equal number of items in both file
assert (actual_labels_df['image'].count()) == predict_labels_df['image'].count()
actual_labels_df = actual_labels_df.sort_values(by=['image'])
predict_labels_df = predict_labels_df.sort_values(by=['image'])
assert (list(actual_labels_df['image'].values) == list(predict_labels_df['image'].values))
# Hopefully y_true and y_pred are alligned properly.
y_labels = actual_labels_df['image'].values
y_true = actual_labels_df['label'].values
y_pred = predict_labels_df['label'].values
print "Confusion matrix:"
print confusion_matrix(y_true, y_pred)
print ""
print "Classification report:"
print classification_report(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
kappa = quadratic_weighted_kappa(y_true, y_pred)
print('Accuracy: %.4f' % accuracy)
print('Kappa: %.4f' % kappa)
print ""
评论列表
文章目录