def log_loss_value_from_scores(weights, total_weights, scores):
"""
computes the logistic loss value from a vector of scores in a numerically stable way
where scores = Z.dot(rho)
see also: http://stackoverflow.com/questions/20085768/
this function is used for heuristics (discrete_descent, sequential_rounding).
to save computation when running the heuristics, we store the scores and
call this function to compute the loss directly from the scores
this reduces the need to recompute the dot product.
Parameters
----------
scores numpy.array of scores = Z.dot(rho)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.dot(weights) / total_weights
return loss_value
评论列表
文章目录