def _uwham_obj_grad(self, f_i):
"""Return the log-likelihood (scalar objective function) and its
gradient (wrt the free energies) for a given value of the free
energies. Note that this expects one less free energy than there are
states, since we always solve under the constraint that f_1 = 0.
"""
_f_i = hstack((zeros(1), asarray(f_i)))
# For numerical stability, use log quantities.
logQ_nj = _f_i - self._u_nj_m
logNorm_n = logsumexp(logQ_nj, 1, self.PIsdiag[newaxis, :])
W_nj = exp(logQ_nj - logNorm_n[:, newaxis])
# Compute matrix products and sums (equivalent to multiplying by
# appropriate vector of ones). Note that using dot() with ndarrays is
# _much_ faster than multiplying matrix objects.
PIW = self.PIs.dot(W_nj.T)
WPI = W_nj.dot(self.PIs)
g = PIW.mean(axis=1) # used in gradient and Hessian computation
kappa = logNorm_n.mean() - (self.PIsdiag.dot(_f_i)).sum()
grad = (g - self.PIsdiag)[1:]
self._hess = (diagflat(g) - PIW.dot(WPI) / self.total_samples)[1:, 1:]
return kappa, grad
评论列表
文章目录