def velb(self):
log_likelihood = numpy.zeros(5)
psi_tau = scipy.special.psi(self._tau)
assert(psi_tau.shape == ( self._K,2))
#assert(psi_tau.shape == (2, self._K))
psi_sum_tau = scipy.special.psi(numpy.sum(self._tau, axis=1)[numpy.newaxis, :])
assert(psi_sum_tau.shape == (1, self._K))
if self._finite_mode:
# compute the probability of feature
log_likelihood[0] = self._K * numpy.log(self._alpha / self._K) + (self._alpha / self._K - 1.) * numpy.sum(psi_tau[0, :] - psi_sum_tau)
# compute the probability of feature statistics
log_likelihood[1] = numpy.sum(self._nu * psi_tau[0, :]) + numpy.sum((1. - self._nu) * psi_tau[1, :]) - self._N * numpy.sum(psi_sum_tau)
else:
# compute the probability of feature
log_likelihood[0] = self._K * numpy.log(self._alpha) + (self._alpha - 1.) * numpy.sum(psi_tau[0, :] - psi_sum_tau)
# compute the probability of feature statistics
for k in xrange(self._K):
log_likelihood[1] += numpy.sum(self._nu[:, k]) * numpy.sum(psi_tau[0, :k + 1] - psi_sum_tau[0, :k + 1])
log_likelihood[1] += numpy.dot((self._N - numpy.sum(self._nu[:, k])), self.compute_expected_pzk0_qjensen(k))
# compute the probability of feature distribution
log_likelihood[2] = -0.5 * self._K * self._D * numpy.log(2 * numpy.pi * self._sigma_a * self._sigma_a)
log_likelihood[2] -= 0.5 / (self._sigma_a * self._sigma_a) * (numpy.sum(self._phi_cov) + numpy.sum(self._phi_mean * self._phi_mean))
# compute the probability of data likelihood
tmp_log_likelihood = numpy.sum(self._X * self._X) - 2 * numpy.sum(self._nu * numpy.dot(self._X, self._phi_mean.transpose()))
tmp_1 = numpy.dot(numpy.ones((self._N, self._D)), (self._phi_cov + self._phi_mean ** 2).transpose())
tmp_log_likelihood += numpy.sum(self._nu * tmp_1)
tmp_1 = numpy.dot(self._nu, self._phi_mean)
tmp_2 = numpy.sum(numpy.dot(self._nu ** 2, self._phi_mean ** 2))
tmp_log_likelihood += numpy.sum(tmp_1 * tmp_1) - numpy.sum(tmp_2)
log_likelihood[3] = -0.5 * self._N * self._D * numpy.log(2 * numpy.pi * self._sigma_x * self._sigma_x)
log_likelihood[3] -= 0.5 / (self._sigma_x * self._sigma_x) * tmp_log_likelihood
# entropy of the proposed distribution
lngamma_tau = scipy.special.gammaln(self._tau)
assert(lngamma_tau.shape == (2, self._K))
lngamma_sum_tau = scipy.special.gammaln(numpy.sum(self._tau, axis=0)[numpy.newaxis, :])
assert(lngamma_sum_tau.shape == (1, self._K))
# compute the entropy of the distribution
log_likelihood[4] = numpy.sum(lngamma_tau[0, :] + lngamma_tau[1, :] - lngamma_sum_tau)
log_likelihood[4] -= numpy.sum((self._tau[0, :] - 1) * psi_tau[0, :] + (self._tau[1, :] - 1) * psi_tau[1, :])
log_likelihood[4] += numpy.sum((self._tau[0, :] + self._tau[1, :] - 2) * psi_sum_tau)
assert(numpy.all(self._phi_cov > 0))
assert(numpy.all(self._nu >= 0) and numpy.all(self._nu <= 1))
log_likelihood[4] += 0.5 * self._K * self._D * numpy.log(2 * numpy.pi * numpy.e)
log_likelihood[4] += 0.5 * numpy.sum(numpy.log(self._phi_cov))
#log_likelihood[4] += 0.5 * numpy.log(numpy.sqrt(numpy.sum(self._phi_cov * self._phi_cov, axis=1)))
log_likelihood[4] -= numpy.sum(self._nu * numpy.log(self._nu) + (1. - self._nu) * numpy.log(1. - self._nu))
return numpy.sum(log_likelihood)
评论列表
文章目录