def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
python类gammaln()的实例源码
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - t * given).sum(axis=-1) - \
n * np.log(np.exp(logits - t * given).sum(axis=-1))
con = ExpConcrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([np.log(0.25), np.log(0.25), np.log(0.5)],
0.1,
[1., 1., 1.2])
_test_value([[np.log(0.25), np.log(0.25), np.log(0.5)],
[np.log(0.1), np.log(0.5), np.log(0.4)]],
0.5,
[[1., 1., 1.], [.5, .5, .4]])
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - (t + 1) * np.log(given)).sum(axis=-1) - \
n * np.log(np.exp(logits - t * np.log(given)).sum(axis=-1))
con = Concrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([0.25, 0.25, 0.5],
0.1,
[1., 1., 1.2])
_test_value([[0.25, 0.25, 0.5],
[0.1, 0.5, 0.4]],
0.5,
[[1., 1., 1.], [.5, .5, .4]])
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family
"""
return -np.sum(-mean + np.log(mean)*y - sp.gammaln(y + 1))
def sample_gamma_posterior (data, shape0, scale0):
S = numpy.sum(data)
Slog = numpy.sum(map(numpy.log, data))
N = len(data)
logP = 1.0+Slog
q = 1.0 + S
r = 1.0 + N
s = 1.0 + N
# lnf_shape = lambda al: (al-1)*logP + special.gammaln(s*al+1) - (1-al*s)*numpy.log(q) -r*special.gammaln(al)
lnf_shape = lambda al: (al-1)*logP - q/scale0 - r*special.gammaln(al) - (al*s)*numpy.log(scale0)
shape_new = sampling.slice (lnf_shape, shape0, thin=5, N=1, lower=0, upper=numpy.inf)[0]
# print "Sfoo", shape0, lnf_shape(shape0)
# print "....", shape_new, lnf_shape(shape_new)
# lnf_rate = lambda beta: -beta*q + s*shape_new*numpy.log(beta)
# rate_new = sampling.slice (lnf_rate, 1.0/scale0, thin=5, N=1, lower=0, upper=numpy.inf)[0]
lnf_scale = lambda beta: (shape_new-1)*logP - q/beta - r*special.gammaln(shape_new) - (shape_new*s)*numpy.log(beta)
scale_new = sampling.slice (lnf_scale, scale0, thin=5, N=1, lower=0, upper=numpy.inf)[0]
# print "Rfoo", 1.0/scale0, lnf_scale(scale0)
# print "...", scale_new, lnf_scale(scale_new)
return [shape_new, scale_new]
def gamma_transition_kernel (data, shape0, scale0, shape1, scale1):
S = numpy.sum(data)
Slog = numpy.sum(map(numpy.log, data))
N = len(data)
logP = 1.0+Slog
q = 1.0 + S
r = 1.0 + N
s = 1.0 + N
lnf_shape = lambda al: (al-1)*logP - q/scale0 - r*special.gammaln(al) - (al*s)*numpy.log(scale0)
p_shape = lnf_shape(shape1)
lnf_scale = lambda beta: (shape1-1)*logP - q/beta - r*special.gammaln(shape1) - (shape1*s)*numpy.log(beta)
p_scale = lnf_scale (scale1)
return p_shape + p_scale
# proposal
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
def log_marg_for_copy(self, copy_components):
"""Return log marginal of data and component assignments: p(X, z)"""
# Log probability of component assignment P(z|alpha)
# Equation (10) in Wood and Black, 2008
# Use \Gamma(n) = (n - 1)!
facts_ = gammaln(copy_components.counts[:copy_components.K])
facts_[copy_components.counts[:copy_components.K] == 0] = 0 # definition of log(0!)
log_prob_z = (
(copy_components.K - 1)*math.log(self.alpha) + gammaln(self.alpha)
- gammaln(np.sum(copy_components.counts[:copy_components.K])
+ self.alpha) + np.sum(facts_)
)
log_prob_X_given_z = copy_components.log_marg()
return log_prob_z + log_prob_X_given_z
def log_marg(self):
"""Return log marginal of data and component assignments: p(X, z)"""
# Log probability of component assignment P(z|alpha)
# Equation (10) in Wood and Black, 2008
# Use \Gamma(n) = (n - 1)!
facts_ = gammaln(self.components.counts[:self.components.K])
facts_[self.components.counts[:self.components.K] == 0] = 0 # definition of log(0!)
log_prob_z = (
(self.components.K - 1)*math.log(self.alpha) + gammaln(self.alpha)
- gammaln(np.sum(self.components.counts[:self.components.K])
+ self.alpha) + np.sum(facts_)
)
log_prob_X_given_z = self.components.log_marg()
return log_prob_z + log_prob_X_given_z
def log_marg(self):
"""Return log marginal of data and component assignments: p(X, z)"""
# Log probability of component assignment, (24.24) in Murphy, p. 842
log_prob_z = (
gammaln(self.kalpha)
- gammaln(self.kalpha + np.sum(self.components.counts))
+ np.sum(
gammaln(
self.components.counts
+ float(self.kalpha)/self.components.K_max
)
- gammaln(self.kalpha/self.components.K_max)
)
)
# Log probability of data in each component
log_prob_X_given_z = self.components.log_marg()
return log_prob_z + log_prob_X_given_z
def _ll(self, x):
result = 0.0
# P(w|z)
result += self.K * special.gammaln(self.beta * self.K)
result += -np.sum(special.gammaln(np.sum(self.n_z_w, axis=1)))
result += np.sum(special.gammaln(self.n_z_w))
result += -self.V * special.gammaln(self.beta)
# P(z|Lambda)
alpha = self.get_alpha(x)
result += np.sum(special.gammaln(np.sum(alpha, axis=1)))
result += -np.sum(special.gammaln(
np.sum(self.n_m_z+alpha, axis=1)))
result += np.sum(special.gammaln(self.n_m_z+alpha))
result += -np.sum(special.gammaln(alpha))
# P(Lambda)
result += -self.K / 2.0 * np.log(2.0 * np.pi * (self.sigma ** 2))
result += -1.0 / (2.0 * (self.sigma ** 2)) * np.sum(x ** 2)
result = -result
return result
def visit_fn(self, temperature):
factor1 = np.exp(np.log(temperature) / (self.qv - 1.0))
factor2 = np.exp((4.0 - self.qv) * np.log(self.qv - 1.0))
factor3 = np.exp((2.0 - self.qv) * np.log(2.0) / (self.qv - 1.0))
factor4 = np.sqrt(np.pi) * factor1 * factor2 / (factor3 * (
3.0 - self.qv))
factor5 = 1.0 / (self.qv - 1.0) - 0.5
d1 = 2.0 - factor5
factor6 = np.pi * (1.0 - factor5) / np.sin(
np.pi * (1.0 - factor5)) / np.exp(gammaln(d1))
sigmax = np.exp(-(self.qv - 1.0) * np.log(
factor6 / factor4) / (3.0 - self.qv))
x = sigmax * self.gaussian_fn(1)
y = self.gaussian_fn(0)
den = np.exp(
(self.qv - 1.0) * np.log((np.fabs(y))) / (3.0 - self.qv))
return x / den
def prob_jk(self, j, k):
# -1 because table of current sample topic jk, is not conditioned on
njdotk = self.count_k_by_j[j, k]
if njdotk == 1:
return np.ones(1)
possible_ms = np.arange(1, njdotk) # +1-1
log_alpha_beta_k = self.get_log_alpha_beta(k)
alpha_beta_k = np.exp(log_alpha_beta_k)
normalizer = gammaln(alpha_beta_k) - gammaln(alpha_beta_k + njdotk)
log_stir = self.stirling_mat(njdotk, possible_ms)
params = normalizer + log_stir + possible_ms*log_alpha_beta_k
return lognormalize(params)
def init_with_data(cls, lda_data, nhaps=3, haplotypes=None, use_ll_offset=False):
V = len(lda_data.alphabets)
status = cls(lda_data)
zero = 1e-30 # value for zero count
status.nhaps = nhaps
status.ll_offset = gammaln(nhaps + 1) if use_ll_offset else 0.
status.alpha = np.array([1. for _ in xrange(nhaps)]) # Uniform distribution
#status.alpha = np.array([1. / nhaps for _ in xrange(nhaps)])
if haplotypes:
status.log_beta = np.log(np.array([[[1 if a == haplotypes[n][m] else zero for n in xrange(nhaps)] for a in lda_data.alphabets] for m in xrange(lda_data.nsites)]))
else:
status.log_beta = np.array([[[np.random.random() for _ in xrange(nhaps)] for _ in lda_data.alphabets] for _ in xrange(lda_data.nsites)])
status.log_beta = log_normalize(status.log_beta, axis=1)
# adhoc initialization
status.log_psi = status.log_beta[np.newaxis, :, :, :]
status.gamma = status.alpha[np.newaxis, :] + np.exp(lda_data.log_d[:, :, :, np.newaxis] + status.log_psi).sum(axis=1).sum(axis=1)
status.ll = status.calc_lower_ll()
#(status.gamma, status.log_psi) = _calc_gamma_psi(log_d, status.alpha, status.log_beta, gamma, log_psi)
return status
def log_p_kmer_table(kmer_counts, p_kmers):
"""log_p_kmer_table calculates the probabilty of an observed kmer table
given as a multinomial given the multinomial probabilities. The kmer
table is preferably collapsed, i.e. if a given sequence is present, its
reverse complement is not.
Args:
kmer_counts: the kmer table
p_kmers: the multinomial probabilities
Returns:
the total log probability as a float
"""
total = 0
log_p = 0
total_p = sum(p_kmers.values())
for kmer in p_kmers.keys():
p_kmers[kmer] /= total_p
for kmer, count in kmer_counts.iteritems():
log_p += count*np.log(p_kmers[kmer]) - special.gammaln(count+1)
total += count
log_p += special.gammaln(total+1)
return log_p
def log_p_multinomial(X, p):
"""log_p_multinomial returns the probability of drawing a vector of
counts from a multinomial parameterized by p.
Args:
X: np.array of counts representing a multinomial draw.
p: np.array of probabilities, corresponding to X
Returns:
the log probability of the multinomial draw.
"""
if sum(X) == 0:
return 0.0
# check that input is valid
assert len(X) == len(p)
eps = 0.0001
assert abs(sum(p)- 1.0) < eps
# calculate log prob.
log_n_choices = special.gammaln(sum(X)+1) - sum([special.gammaln(x_i+1)
for x_i in X])
log_p_items = sum(x_i*np.log(p_i) for (x_i, p_i) in zip(X, p))
return log_n_choices + log_p_items
def log_prob_given_alpha(self):
"""log_prob_given_alpha returns the probability of the mixing
proportions, gamma, under the CRP prior
Returns:
the log probability as a float.
"""
K = len(self.component_counts.keys()) # number of seen components
N = sum(self.component_counts.values())
log_numerator = K*np.log(self.alpha_crp) + sum(
special.gammaln(N_k) for N_k in
self.component_counts.values()
)
log_denom = (special.gammaln(N + self.alpha_crp) -
special.gammaln(self.alpha_crp))
return log_numerator - log_denom
def makePlot_pdf_Phi(
nu=0, tau=0, phi_grid=None,
ngrid=1000, min_phi=-100, max_phi=100):
label = 'nu=%7.2f' % (nu)
cPrior = - gammaln(nu) + gammaln(nu-tau) + gammaln(tau)
if phi_grid is None:
phi_grid = np.linspace(min_phi, max_phi, ngrid)
logpdf_grid = tau * phi_grid - nu * c_Phi(phi_grid) - cPrior
pdf_grid = np.exp(logpdf_grid)
IntegralVal = np.trapz(pdf_grid, phi_grid)
mu_grid = phi2mu(phi_grid)
ExpectedPhiVal = np.trapz(pdf_grid * phi_grid, phi_grid)
ExpectedMuVal = np.trapz(pdf_grid * mu_grid, phi_grid)
print '%s Integral=%.4f E[phi]=%6.3f E[mu]=%.4f' % (
label, IntegralVal, ExpectedPhiVal, ExpectedMuVal)
pylab.plot(phi_grid, pdf_grid, '-', label=label)
pylab.xlabel('phi (log odds ratio)')
pylab.ylabel('density p(phi)')
def makePlot_pdf_Mu(
nu=0, tau=0, phi_grid=None,
ngrid=1000, min_phi=-100, max_phi=100):
label = 'nu=%7.2f' % (nu,)
cPrior = - gammaln(nu) + gammaln(nu-tau) + gammaln(tau)
mu_grid = np.linspace(1e-15, 1-1e-15, ngrid)
phi_grid = mu2phi(mu_grid)
logpdf_grid = tau * phi_grid - nu * c_Phi(phi_grid) - cPrior
logJacobian_grid = logJacobian_mu(mu_grid)
pdf_grid = np.exp(logpdf_grid + logJacobian_grid)
IntegralVal = np.trapz(pdf_grid, mu_grid)
ExpectedMuVal = np.trapz(pdf_grid * mu_grid, mu_grid)
ExpectedPhiVal = np.trapz(pdf_grid * phi_grid, mu_grid)
print '%s Integral=%.4f E[phi]=%6.3f E[mu]=%.4f' % (
label, IntegralVal, ExpectedPhiVal, ExpectedMuVal)
pylab.plot(mu_grid, pdf_grid, '-', label=label)
pylab.xlabel('mu')
pylab.ylabel('density p(mu)')
def makePlot_pdf_Phi(
nu=0, tau=0, phi_grid=None,
ngrid=1000, min_phi=-100, max_phi=100):
label = 'nu=%7.2f' % (nu)
cPrior = - gammaln(nu) + gammaln(nu-tau) + gammaln(tau)
if phi_grid is None:
phi_grid = np.linspace(min_phi, max_phi, ngrid)
logpdf_grid = tau * phi_grid - nu * c_Phi(phi_grid) - cPrior
pdf_grid = np.exp(logpdf_grid)
IntegralVal = np.trapz(pdf_grid, phi_grid)
mu_grid = phi2mu(phi_grid)
ExpectedPhiVal = np.trapz(pdf_grid * phi_grid, phi_grid)
ExpectedMuVal = np.trapz(pdf_grid * mu_grid, phi_grid)
print '%s Integral=%.4f E[phi]=%6.3f E[mu]=%.4f' % (
label, IntegralVal, ExpectedPhiVal, ExpectedMuVal)
pylab.plot(phi_grid, pdf_grid, '-', label=label)
pylab.xlabel('phi (log odds ratio)')
pylab.ylabel('density p(phi)')
def L_hdp(beta, omega, Tvec, alpha):
''' Compute top-tier of hdp bound.
'''
K = omega.size
assert K == beta.size
assert K == Tvec.size
rho = OROB.beta2rho(beta, K)
eta1 = rho * omega
eta0 = (1-rho) * omega
digamma_omega = digamma(omega)
digamma_eta1 = digamma(eta1)
digamma_eta0 = digamma(eta0)
ElogU = digamma_eta1 - digamma_omega
Elog1mU = digamma_eta0 - digamma_omega
Lscore = \
np.sum(gammaln(eta1) + gammaln(eta0) - gammaln(omega)) \
+ np.inner(nDoc + 1 - eta1, ElogU) \
+ np.inner(nDoc * OROB.kvec(K) + gamma - eta0, Elog1mU) \
+ alpha * np.inner(beta, Tvec)
return Lscore
def c_Func(nu, logdetB, M, logdetV):
''' Evaluate cumulant function c at given parameters.
c is the cumulant of the MatrixNormal-Wishart, using common params.
Returns
--------
c : scalar real value of cumulant function at provided args
'''
if logdetB.ndim >= 2:
logdetB = np.log(np.linalg.det(logdetB))
if logdetV.ndim >= 2:
logdetV = np.log(np.linalg.det(logdetV))
D, E = M.shape
dvec = np.arange(1, D + 1, dtype=np.float)
return - 0.25 * D * (D - 1) * LOGPI \
- 0.5 * D * LOGTWO * nu \
- np.sum(gammaln(0.5 * (nu + 1 - dvec))) \
+ 0.5 * nu * logdetB \
- 0.5 * D * E * LOGTWOPI \
+ 0.5 * E * logdetV
def c_Diff(nu1, logdetB1, D, nu2, logdetB2):
''' Evaluate difference of cumulant functions c(params1) - c(params2)
May be more numerically stable than directly using c_Func
to find the difference.
Returns
-------
diff : scalar real value of the difference in cumulant functions
'''
if logdetB1.ndim >= 2:
assert D == logdetB1.shape[-1]
logdetB1 = np.log(np.linalg.det(logdetB1))
logdetB2 = np.log(np.linalg.det(logdetB2))
dvec = np.arange(1, D + 1, dtype=np.float)
return - 0.5 * D * LOGTWO * (nu1 - nu2) \
- np.sum(gammaln(0.5 * (nu1 + 1 - dvec))) \
+ np.sum(gammaln(0.5 * (nu2 + 1 - dvec))) \
+ 0.5 * (nu1 * logdetB1 - nu2 * logdetB2)
def _calcPredProbVec_Fast(self, SS, x):
p = np.zeros(SS.K)
nu, beta, m, kappa = self.calcPostParams(SS)
kbeta = beta
kbeta *= ((kappa + 1) / kappa)[:, np.newaxis]
base = np.square(x - m)
base /= kbeta
base += 1
# logp : 2D array, size K x D
logp = (-0.5 * (nu + 1))[:, np.newaxis] * np.log(base)
logp += (gammaln(0.5 * (nu + 1)) - gammaln(0.5 * nu))[:, np.newaxis]
logp -= 0.5 * np.log(kbeta)
# p : 1D array, size K
p = np.sum(logp, axis=1)
p -= np.max(p)
np.exp(p, out=p)
return p
def c_Diff(nu1, beta1, m1, kappa1,
nu2, beta2, m2, kappa2):
''' Evaluate difference of cumulant functions c(params1) - c(params2)
May be more numerically stable than directly using c_Func
to find the difference.
Returns
-------
diff : scalar real value of the difference in cumulant functions
'''
cDiff = - 0.5 * LOGTWO * (nu1 - nu2) \
- gammaln(0.5 * nu1) + gammaln(0.5 * nu2) \
+ 0.5 * (np.log(kappa1) - np.log(kappa2)) \
+ 0.5 * (nu1 * np.log(beta1) - nu2 * np.log(beta2))
return np.sum(cDiff)
def _calcPredProbVec_Fast(self, SS, x):
nu, B, m, kappa = self.calcPostParams(SS)
kB = B
kB *= ((kappa + 1) / kappa)[:, np.newaxis, np.newaxis]
logp = np.zeros(SS.K)
p = logp # Rename so its not confusing what we're returning
for k in xrange(SS.K):
cholKB = scipy.linalg.cholesky(kB[k], lower=1)
logdetKB = 2 * np.sum(np.log(np.diag(cholKB)))
mVec = np.linalg.solve(cholKB, x - m[k])
mDist_k = np.inner(mVec, mVec)
logp[k] = -0.5 * logdetKB - 0.5 * \
(nu[k] + 1) * np.log(1.0 + mDist_k)
logp += gammaln(0.5 * (nu + 1)) - gammaln(0.5 * (nu + 1 - self.D))
logp -= np.max(logp)
np.exp(logp, out=p)
return p
def c_Func(nu, logdetB, m, kappa):
''' Evaluate cumulant function at given params.
Returns
--------
c : scalar real value of cumulant function at provided args
'''
if logdetB.ndim >= 2:
logdetB = np.log(np.linalg.det(logdetB))
D = m.size
dvec = np.arange(1, D + 1, dtype=np.float)
return - 0.5 * D * LOGTWOPI \
- 0.25 * D * (D - 1) * LOGPI \
- 0.5 * D * LOGTWO * nu \
- np.sum(gammaln(0.5 * (nu + 1 - dvec))) \
+ 0.5 * D * np.log(kappa) \
+ 0.5 * nu * logdetB
def c_Diff(nu1, logdetB1, m1, kappa1,
nu2, logdetB2, m2, kappa2):
''' Evaluate difference of cumulant functions c(params1) - c(params2)
May be more numerically stable than directly using c_Func
to find the difference.
Returns
-------
diff : scalar real value of the difference in cumulant functions
'''
if logdetB1.ndim >= 2:
logdetB1 = np.log(np.linalg.det(logdetB1))
if logdetB2.ndim >= 2:
logdetB2 = np.log(np.linalg.det(logdetB2))
D = m1.size
dvec = np.arange(1, D + 1, dtype=np.float)
return - 0.5 * D * LOGTWO * (nu1 - nu2) \
- np.sum(gammaln(0.5 * (nu1 + 1 - dvec))) \
+ np.sum(gammaln(0.5 * (nu2 + 1 - dvec))) \
+ 0.5 * D * (np.log(kappa1) - np.log(kappa2)) \
+ 0.5 * (nu1 * logdetB1 - nu2 * logdetB2)