def evaluate(self, ranking=None, labels=None, ranked_labels=None, scales=None):
'''
Evaluate NDCG metric on the specified ranked list of document relevance scores.
The function input can be either ranked list of relevance labels (`ranked_labels`),
which is most convenient from the computational point of view, or it can be in
the form of ranked list of documents (`ranking`) and corresponding relevance scores
(`labels`), from which the ranked document relevance labels are computed.
Parameters:
-----------
ranking: array, shape = (n_documents,)
Specify list of ranked documents.
labels: array: shape = (n_documents,)
Specify relevance score for each document.
ranked_labels: array, shape = (n_documents,)
Relevance scores of the ranked documents. If not given, then
`ranking` and `labels` must not be None, `ranked_labels` will
be than inferred from them.
scales: float, optional (default is None)
The ideal DCG value on the given documents. If None is given
it will be computed from the document relevance scores.
'''
if ranked_labels is not None:
return self.get_score_from_labels_list(ranked_labels)
elif ranking is not None and labels is not None:
if ranking.shape[0] != labels.shape[0]:
raise ValueError('number of ranked documents != number of relevance labels (%d, %d)' \
% (ranking.shape[0], labels.shape[0]))
ranked_labels = np.array(sorted(labels, key=dict(zip(labels,ranking)).get, reverse=True), dtype=np.intc)
return self.get_score_from_labels_list(ranked_labels)
评论列表
文章目录