def get_relevance_scores(matched_predictions, positive_feedback, not_rated_penalty):
users_num = matched_predictions.shape[0]
reldata = get_relevance_data(matched_predictions, positive_feedback, not_rated_penalty)
true_pos, false_pos = reldata.tp, reldata.fp
true_neg, false_neg = reldata.tn, reldata.fn
with np.errstate(invalid='ignore'):
# true positive rate
precision = true_pos / (true_pos + false_pos)
# sensitivity
recall = true_pos / (true_pos + false_neg)
# false positive rate
fallout = false_pos / (false_pos + true_neg)
# true negative rate
specifity = true_neg / (false_pos + true_neg)
# false negative rate
miss_rate = false_neg / (false_neg + true_pos)
#average over all users
precision = unmask(np.nansum(precision) / users_num)
recall = unmask(np.nansum(recall) / users_num)
fallout = unmask(np.nansum(fallout) / users_num)
specifity = unmask(np.nansum(specifity) / users_num)
miss_rate = unmask(np.nansum(miss_rate) / users_num)
scores = namedtuple('Relevance', ['precision', 'recall', 'fallout', 'specifity', 'miss_rate'])
scores = scores._make([precision, recall, fallout, specifity, miss_rate])
return scores
评论列表
文章目录