def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
''' confusion_matrix?heatmap??????
Args:
cm -- confusion_matrix
title -- ????
cmap -- ??????????
'''
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
python类confusion_matrix()的实例源码
def print_metrics_regression(y_true, predictions, verbose=1):
predictions = np.array(predictions)
predictions = np.maximum(predictions, 0).flatten()
y_true = np.array(y_true)
y_true_bins = [get_bin_custom(x, CustomBins.nbins) for x in y_true]
prediction_bins = [get_bin_custom(x, CustomBins.nbins) for x in predictions]
cf = metrics.confusion_matrix(y_true_bins, prediction_bins)
if verbose:
print "Custom bins confusion matrix:"
print cf
kappa = metrics.cohen_kappa_score(y_true_bins, prediction_bins,
weights='linear')
mad = metrics.mean_absolute_error(y_true, predictions)
mse = metrics.mean_squared_error(y_true, predictions)
mape = mean_absolute_percentage_error(y_true, predictions)
if verbose:
print "Mean absolute deviation (MAD) =", mad
print "Mean squared error (MSE) =", mse
print "Mean absolute percentage error (MAPE) =", mape
print "Cohen kappa score =", kappa
return {"mad": mad,
"mse": mse,
"mape": mape,
"kappa": kappa}
def print_metrics_log_bins(y_true, predictions, verbose=1):
y_true_bins = [get_bin_log(x, LogBins.nbins) for x in y_true]
prediction_bins = [get_bin_log(x, LogBins.nbins) for x in predictions]
cf = metrics.confusion_matrix(y_true_bins, prediction_bins)
if verbose:
print "LogBins confusion matrix:"
print cf
return print_metrics_regression(y_true, predictions, verbose)
def test_data_cnn_rnn(data, target, groups, cnn, rnn, layername='fc1', cropsize=2800, verbose=1, only_lstm = False):
"""
mode = 'scores' or 'preds'
take two ready trained models (cnn+rnn)
test on input data and return acc+f1
"""
if target.ndim==2: target = np.argmax(target,1)
if cropsize != 0:
diff = (data.shape[1] - cropsize)//2
data = data[:,diff:-diff:,:]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if only_lstm == False:
cnn_pred = cnn.predict_classes(data, 1024,verbose=0)
else:
cnn_pred = target
features = get_activations(cnn, data, 'fc1', verbose=verbose)
cnn_acc = accuracy_score(target, cnn_pred)
cnn_f1 = f1_score(target, cnn_pred, average='macro')
seqlen = rnn.input_shape[1]
features_seq, target_seq, groups_seq = tools.to_sequences(features, target, seqlen=seqlen, groups=groups)
new_targ_seq = np.roll(target_seq, 4)
rnn_pred = rnn.predict_classes(features_seq, 1024, verbose=0)
rnn_acc = accuracy_score(new_targ_seq, rnn_pred)
rnn_f1 = f1_score(new_targ_seq,rnn_pred, average='macro')
confmat = confusion_matrix(new_targ_seq, rnn_pred)
return [cnn_acc, cnn_f1, rnn_acc, rnn_f1, confmat, (rnn_pred, target_seq, groups_seq)]
#%%
def run_model(model):
'''Train model'''
# Call global variables
x_train, x_test, y_train, y_test = X_TRAIN, X_TEST, Y_TRAIN, Y_TEST
model.fit(x_train, y_train)
# make predictions for test data
y_pred = model.predict(x_test)
# Accuracy
acc = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: %.2f%%' % (acc * 100.0))
# F1_score
# f1_score = metrics.f1_score(y_test, y_pred)
# print("F1_score: %.2f%%" % (f1_score * 100.0))
# AUC of ROC
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
auc = metrics.auc(fpr, tpr)
print('AUC: %.3f' % (auc))
# Logs for each fold
crossvalidation_acc.append(acc)
crossvalidation_auc.append(auc)
if ARGS.m:
cnf_matrix = confusion_matrix(y_test, y_pred)
print(cnf_matrix)
np.set_printoptions(precision=2)
if ARGS.t == '2':
classes = np.asarray(['Spiced', 'Non-spliced'])
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True)
elif ARGS.t == '3':
classes = np.asarray(['Low', 'Medium', 'High'])
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True)
plt.show()
if ARGS.f:
feature_selection(imp=IMP, model=model)
print()
def rf_test(X,y):
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=10)
rf_model = RandomForestClassifier(n_estimators = 100, n_jobs=-1)
rf_model.fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
print metrics.accuracy_score(y_test,y_pred)
#plot confusion_matrix, 'col' is the y target
def mean_class_accuracy(scores, labels):
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(labels, pred).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
return np.mean(cls_hit/cls_cnt)
def classifier_accuracy_report(true_y, prediction):
auc = roc_auc_score(true_y.astype(float), prediction.astype(float))
conf = confusion_matrix(true_y, prediction)
lines = ['AUC: %.3f' % auc,
'Confusion matrix: \n\t%s' % str(conf).replace('\n','\n\t')]
return '\n'.join(lines) + '\n'
def leave_one_out_report(combined_results):
""" Evaluate leave-one-out CV results from different methods.
Arguments:
combined_results: list of tuples of the form
(method_name, true_y_vector, predicted_probabilities_vector)
Note the vectors really do need to be numpy arrays.
Returns: formatted report as string
"""
###
# Unfortunate code duplication with tabulate_metrics here,
# to be resolved later
probability_metrics = [
('AUC', roc_auc_score),
('AP', metrics.average_precision_score)
]
binary_metrics = [
('F1', metrics.f1_score),
('MCC', metrics.matthews_corrcoef),
('precision', metrics.precision_score),
('recall', metrics.recall_score)
]
metric_results = {label: [] for label, _ in
probability_metrics + binary_metrics}
metric_results.update({'tn': [], 'fp': [], 'fn': [], 'tp': []})
for label, metric in probability_metrics:
for fold, y_true, y_pred in combined_results:
metric_results[label].append(metric(y_true, y_pred))
for method, y_true, probabilities in combined_results:
y_pred = probabilities > 0.5
for label, metric in binary_metrics:
metric_results[label].append(metric(y_true, y_pred))
conf = zip(
('tn', 'fp', 'fn', 'tp'),
metrics.confusion_matrix(y_true, y_pred).flat
)
for label, n in conf:
metric_results[label].append(n)
index=[t[0] for t in combined_results]
table = pd.DataFrame(data=metric_results,
index=index)
report = table.to_string(float_format=lambda x: '%.3g' % x)
return report
def true_positives(_, predictions_binary, labels, parameters):
return int(confusion_matrix(labels, predictions_binary)[1, 1])
def false_positives(_, predictions_binary, labels, parameters):
return int(confusion_matrix(labels, predictions_binary)[0, 1])
def true_negatives(_, predictions_binary, labels, parameters):
return int(confusion_matrix(labels, predictions_binary)[0, 0])
def false_negatives(_, predictions_binary, labels, parameters):
return int(confusion_matrix(labels, predictions_binary)[1, 0])
metrics.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def mean_class_accuracy(scores, labels):
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(labels, pred).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
return np.mean(cls_hit/cls_cnt)
main.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def get_top1(label_sum,out_sum):
label_sum=label_sum.numpy()
out_sum=out_sum.numpy().argmax(1)
assert len(label_sum)==len(out_sum)
cf=confusion_matrix(label_sum,out_sum).astype(float)
cls_cnt=cf.sum(axis=1)
cls_hit=np.diag(cf)
#accuracy=sum([1 for i in range(len(label_sum)) if label_sum[i]==out_sum[i]])/float(len(label_sum))
#return accuracy
return np.mean(cls_hit/cls_cnt)
def plot_conf_matrix(y_actual,y_predict,labels):
cm = confusion_matrix(y_actual,y_predict,labels)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
pl.title('confusion matrix')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
pl.xlabel('Predicted')
pl.ylabel('True')
pl.show()
def plot_conf_matrix(y_actual,y_predict,labels):
cm = confusion_matrix(y_actual,y_predict,labels)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
pl.title('confusion matrix')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
pl.xlabel('Predicted')
pl.ylabel('True')
pl.show()
def plot_conf_matrix(y_actual,y_predict,labels):
cm = confusion_matrix(y_actual,y_predict,labels)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
pl.title('confusion matrix')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
pl.xlabel('Predicted')
pl.ylabel('True')
pl.show()
def plot_conf_matrix(y_actual,y_predict,labels):
cm = confusion_matrix(y_actual,y_predict,labels)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
pl.title('confusion matrix')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
pl.xlabel('Predicted')
pl.ylabel('True')
pl.show()
def plot_conf_matrix(y_actual,y_predict,labels):
cm = confusion_matrix(y_actual,y_predict,labels)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
pl.title('confusion matrix')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
pl.xlabel('Predicted')
pl.ylabel('True')
pl.show()