def _obj_func(self, w, X, Y, out):
"""
Computes primal value end gradient
Parameters
----------
w : {array-like} - hyperplane normal vector
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Y : array-like, shape = [n_samples]
Target vector relative to X
out: loss function values
Returns
-------
(obj,grad) : tuple, obj - function value, grad - gradient
"""
l2reg = self.l2reg
# we remember bias, to recover it after gradient computation
bias = w[-1]
# set bias to zero, don't penalize b
w[-1] = 0
max_out = np.fmax(0, out)
obj = np.sum(max_out ** 2) / 2 + l2reg * w.dot(w) / 2
grad = l2reg * w - np.append([np.dot(max_out * Y, X)], [np.sum(max_out * Y)])
w[-1] = bias
return (obj, grad)
评论列表
文章目录