evaluation.py 文件源码

python
阅读 26 收藏 0 点赞 0 评论 0

项目:polara 作者: Evfro 项目源码 文件源码
def get_relevance_scores(matched_predictions, positive_feedback, not_rated_penalty):
    users_num = matched_predictions.shape[0]
    reldata = get_relevance_data(matched_predictions, positive_feedback, not_rated_penalty)
    true_pos, false_pos = reldata.tp, reldata.fp
    true_neg, false_neg = reldata.tn, reldata.fn

    with np.errstate(invalid='ignore'):
        # true positive rate
        precision = true_pos / (true_pos + false_pos)
        # sensitivity
        recall = true_pos / (true_pos + false_neg)
        # false positive rate
        fallout = false_pos / (false_pos + true_neg)
        # true negative rate
        specifity = true_neg / (false_pos + true_neg)
        # false negative rate
        miss_rate = false_neg / (false_neg + true_pos)

    #average over all users
    precision = unmask(np.nansum(precision) / users_num)
    recall = unmask(np.nansum(recall) / users_num)
    fallout = unmask(np.nansum(fallout) / users_num)
    specifity = unmask(np.nansum(specifity) / users_num)
    miss_rate = unmask(np.nansum(miss_rate) / users_num)

    scores = namedtuple('Relevance', ['precision', 'recall', 'fallout', 'specifity', 'miss_rate'])
    scores = scores._make([precision, recall, fallout, specifity, miss_rate])
    return scores
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号