def my_custom_log_loss_func(ground_truth, p_predicitons, penalty=list(), eps=1e-15): # # as a general rule, the first parameter of your function should be the actual answer (ground_truth) and the second should be the predictions or the predicted probabilities (p_predicitons)
adj_p = np.clip(p_predicitons, eps, 1 - eps)
lb = LabelBinarizer()
g = lb.fit_transform(ground_truth)
if g.shape[1] == 1:
g = np.append(1 - g, g, axis=1)
if penalty:
g[:,penalty] = g[:,penalty] * 2
summation = np.sum(g * np.log(adj_p))
return summation * (-1.0/len(ground_truth))
# my_custom_scorer = make_scorer(my_custom_log_loss_func, greater_is_better=False, needs_proba=True, penalty=[4,9]) # here we set the penalty on for highly confusable numbers 4 and 9 (can change it or even leave it empty to check whether the resulting loss will be the same as that of the previous experiment with the sklearn.metrics.log_loss function)
# This new loss function will double log_loss when evaluating the results of the classes of number 4 and 9
evaluation_custom-scoring-function-grid-search-runtime.py 文件源码
python
阅读 29
收藏 0
点赞 0
评论 0
评论列表
文章目录