def functional_grad_val(self, coefs, ground_truth, Gram,
weight=None, zeronan=None):
"""Compute the gradient and value of the Empirical OVK ridge risk.
Parameters
----------
coefs : {vector-like}, shape = [n_samples1 * n_targets]
Coefficient to optimise
ground_truth : {vector-like}
Targets samples
Gram : {LinearOperator}
Gram matrix acting on the coefs
L : array, shape = [n_samples_miss, n_samples_miss]
Graph Laplacian of data with missing targets (semi-supervised
learning).
Returns
-------
Tuple{float, vector-like} : Empirical OVK ridge risk and its gradient
returned as a tuple.
"""
np = ground_truth.size
pred = Gram * coefs
vgt = masked_invalid(ground_truth)
vgt[where(vgt.mask)] = pred[where(vgt.mask)]
reg = inner(coefs, pred) # reg in rkhs
if weight is None or zeronan is None:
res = pred - vgt
obj = norm(res) ** 2 / (2 * np)
else:
wpred = weight * pred # sup x identity | unsup x lbda_m x L
res = wpred - zeronan * vgt
wip = wpred - zeronan * wpred # only unsup part of wpred
lap = inner(wip, pred) # Laplacian part x lambda_m
obj = norm(zeronan * res) ** 2 / (2 * np) # Loss
obj += lap / (2 * np) # Laplacian regularization
obj += self.lbda * reg / (2 * np) # Regulariation
return obj, Gram * res / np + self.lbda * pred / np
评论列表
文章目录