def c_Beta(g1, g0):
''' Calculate cumulant function of the Beta distribution
Input can be vectors, in which case we compute sum over
several cumulant functions of the independent distributions:
\prod_k Beta(g1[k], g0[k])
Args
----
g1 : 1D array, size K
first parameter of a Beta distribution
g0 : 1D array, size K
second parameter of a Beta distribution
Returns
-------
c : scalar sum of the cumulants defined by provided parameters
'''
return np.sum(gammaln(g1 + g0) - gammaln(g1) - gammaln(g0))
python类gammaln()的实例源码
def c_Beta(g1, g0):
''' Calculate cumulant function of the Beta distribution
Input can be vectors, in which case we compute sum over
several cumulant functions of the independent distributions:
\prod_k Beta(g1[k], g0[k])
Args
----
g1 : 1D array, size K
first parameter of a Beta distribution
g0 : 1D array, size K
second parameter of a Beta distribution
Returns
-------
c : scalar sum of the cumulants defined by provided parameters
'''
return np.sum(gammaln(g1 + g0) - gammaln(g1) - gammaln(g0))
def calcELBO_SingleDocFromSparseResp(
spResp_d, ElogLik_d, wc_d, alphaEbeta):
''' Calculate single document contribution to the ELBO objective.
This isolates all ELBO terms that depend on local parameters of this doc.
Returns
-------
L : scalar float
value of ELBO objective, up to additive constant.
This constant is independent of any local parameter attached to doc d.
'''
DocTopicCount_d = wc_d * spResp_d # Sparse multiply
theta_d = DocTopicCount_d + alphaEbeta
R_d = spResp_d.toarray()
wR_d = wc_d[:, np.newaxis] * R_d
L_alloc = np.sum(gammaln(theta_d))
L_data = np.sum(wR_d * ElogLik_d)
RlogR = np.sum(wR_d * np.log(R_d + 1e-100))
return L_alloc + L_data - RlogR
def log_prob_z(self):
"""
Return the log marginal probability of component assignment P(z).
See (24.24) in Murphy, p. 842.
"""
log_prob_z = (
gammaln(self.alpha)
- gammaln(self.alpha + np.sum(self.components.counts))
+ np.sum(
gammaln(
self.components.counts
+ float(self.alpha)/self.components.K_max
)
- gammaln(self.alpha/self.components.K_max)
)
)
return log_prob_z
def log_marg(self):
"""Return log marginal of data and component assignments: p(X, z)"""
log_prob_z = self.log_prob_z()
log_prob_X_given_z = self.log_prob_X_given_z()
# # Log probability of component assignment, (24.24) in Murphy, p. 842
# log_prob_z = (
# gammaln(self.alpha)
# - gammaln(self.alpha + np.sum(self.components.counts))
# + np.sum(
# gammaln(
# self.components.counts
# + float(self.alpha)/self.components.K_max
# )
# - gammaln(self.alpha/self.components.K_max)
# )
# )
# # Log probability of data in each component
# log_prob_X_given_z = self.components.log_marg()
return log_prob_z + log_prob_X_given_z
# @profile
def _vlb(self):
vlb = 0.
vlb += sum(l.get_vlb() for l in self.labels_list)
vlb += self.weights.get_vlb()
vlb += sum(c.get_vlb() for c in self.components)
for l in self.labels_list:
vlb += np.sum([r.dot(c.expected_log_likelihood(l.data))
for c,r in zip(self.components, l.r.T)])
# add in symmetry factor (if we're actually symmetric)
if len(set(type(c) for c in self.components)) == 1:
vlb += special.gammaln(len(self.components)+1)
return vlb
### SVI
def get_vlb(self):
# E[ln p(mu) / q(mu)] part
h_0, J_0, h_mf, J_mf = self.h_0, self.J_0, self.h_mf, self.J_mf
Emu, Emu2 = self._E_mu
p_mu_avgengy = -1./2*J_0*Emu2 + h_0*Emu \
- 1./2*(h_0**2/J_0) + 1./2*np.log(J_0) - 1./2*np.log(2*np.pi)
q_mu_entropy = 1./2*np.log(2*np.pi*np.e/J_mf)
# E[ln p(sigmasq) / q(sigmasq)] part
alpha_0, beta_0, alpha_mf, beta_mf = \
self.alpha_0, self.beta_0, self.alpha_mf, self.beta_mf
(Esigmasqinv, Elnsigmasq) = self._E_sigmasq
p_sigmasq_avgengy = (-alpha_0-1)*Elnsigmasq + (-beta_0)*Esigmasqinv \
- (special.gammaln(alpha_0) - alpha_0*np.log(beta_0))
q_sigmasq_entropy = alpha_mf + np.log(beta_mf) + special.gammaln(alpha_mf) \
- (1+alpha_mf)*special.digamma(alpha_mf)
return p_mu_avgengy + q_mu_entropy + p_sigmasq_avgengy + q_sigmasq_entropy
def get_vlb(self):
# E[ln p(mu) / q(mu)] part
h_0, J_0, h_mf, J_mf = self.h_0, self.J_0, self.h_mf, self.J_mf
Emu, Emu2 = self._E_mu
p_mu_avgengy = -1./2*J_0*Emu2 + h_0*Emu \
- 1./2*(h_0**2/J_0) + 1./2*np.log(J_0) - 1./2*np.log(2*np.pi)
q_mu_entropy = 1./2*np.log(2*np.pi*np.e/J_mf)
# E[ln p(sigmasq) / q(sigmasq)] part
alpha_0, beta_0, alpha_mf, beta_mf = \
self.alpha_0, self.beta_0, self.alpha_mf, self.beta_mf
(Esigmasqinv, Elnsigmasq) = self._E_sigmasq
p_sigmasq_avgengy = (-alpha_0-1)*Elnsigmasq + (-beta_0)*Esigmasqinv \
- (special.gammaln(alpha_0) - alpha_0*np.log(beta_0))
q_sigmasq_entropy = alpha_mf + np.log(beta_mf) + special.gammaln(alpha_mf) \
- (1+alpha_mf)*special.digamma(alpha_mf)
return np.sum(p_mu_avgengy + q_mu_entropy + p_sigmasq_avgengy + q_sigmasq_entropy)
def log_likelihood(self,x,r=None,p=None):
r = r if r is not None else self.r
p = p if p is not None else self.p
x = np.array(x,ndmin=1)
if self.p > 0:
xnn = x[x >= 0]
raw = np.empty(x.shape)
raw[x>=0] = special.gammaln(r + xnn) - special.gammaln(r) \
- special.gammaln(xnn+1) + r*np.log(1-p) + xnn*np.log(p)
raw[x<0] = -np.inf
return raw if isinstance(x,np.ndarray) else raw[0]
else:
raw = np.log(np.zeros(x.shape))
raw[x == 0] = 0.
return raw if isinstance(x,np.ndarray) else raw[0]
def _get_statistics(self,data):
# NOTE: since this isn't really in exponential family, this method needs
# to look at hyperparameters. form posterior hyperparameters for the p
# parameters here so we can integrate them out and get the r statistics
n, tot = super(NegativeBinomialIntegerR,self)._get_statistics(data)
if n > 0:
alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
data = flattendata(data)
log_marg_likelihoods = \
special.betaln(alpha_n, betas_n) \
- special.betaln(self.alpha_0, self.beta_0) \
+ (special.gammaln(data[:,na]+self.r_support)
- special.gammaln(data[:,na]+1) \
- special.gammaln(self.r_support)).sum(0)
else:
log_marg_likelihoods = np.zeros_like(self.r_support)
return n, tot, log_marg_likelihoods
vector_student_t_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def log_prob(self, x):
def _compute(df, loc, scale_tril, x):
k = scale_tril.shape[-1]
ildj = np.sum(np.log(np.abs(np.diag(scale_tril))), axis=-1)
logz = ildj + k * (0.5 * np.log(df) +
0.5 * np.log(np.pi) +
special.gammaln(0.5 * df) -
special.gammaln(0.5 * (df + 1.)))
y = linalg.solve_triangular(scale_tril, np.matrix(x - loc).T,
lower=True, overwrite_b=True)
logs = -0.5 * (df + 1.) * np.sum(np.log1p(y**2. / df), axis=-2)
return logs - logz
if not self._df.shape:
return _compute(self._df, self._loc, self._scale_tril, x)
return np.concatenate([
[_compute(self._df[i], self._loc[i], self._scale_tril[i], x[:, i, :])]
for i in range(len(self._df))]).T
def logprob_dc(counts, prior, axis=None):
"""Non-normalized log probability of a Dirichlet-Categorical distribution.
See https://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution
"""
# Note that this excludes the factorial(counts) term, since we explicitly
# track permutations in assignments.
return gammaln(np.add(counts, prior, dtype=np.float32)).sum(axis)
def neg_bin_log_pmf(k, mu, phi):
""" Log(PMF) of negative binomial distribution with mean mu and dispersion phi,
conveniently parameterized.
Args:
k (int) - NB random variable
mu (float) - mean
phi (float) - dispersion
Returns:
The log of the pmf at k. """
r = 1.0/phi
return gammaln(r+k) - (gammaln(r) + gammaln(k+1)) + \
k * np.log(mu/(r+mu)) + \
r * np.log(r/(r+mu))
def trunc_logLik(data, mu, alpha):
log_1_plus_a_mu = np.log(1 + alpha*mu)
log_1_minus_prob_zero = np.log(1.0 - np.exp(-np.log(1.0+alpha*mu)/alpha))
alpha_inv = 1.0/alpha
lim = int(np.max(data.keys()))
holding_val=0.0
log_L=0.0
for i in range(1, lim+1):
holding_val += np.log(1+alpha*(i-1))
log_L += data[i]* (holding_val - special.gammaln(i) + i*np.log(mu)-(i+alpha_inv)*log_1_plus_a_mu - log_1_minus_prob_zero)
return log_L
def log_multinomial_coefficient(list_of_primitive_rules):
"""
Count copies of each primitive and calculate a log multinomial coefficient.
~
"""
list_of_tuples = [p.as_tuple() for p in list_of_primitive_rules]
counts = dict.fromkeys(set(list_of_tuples),0)
for t in list_of_tuples:
counts[t] += 1
N = len(list_of_primitive_rules)
v = np.array(counts.values())
return gammaln(N+1)-np.sum(gammaln(v+1))
def log_multinomial_coefficient(list_of_tuples):
counts = dict.fromkeys(set(list_of_tuples),0)
for t in list_of_tuples:
counts[t] += 1
N = len(list_of_tuples)
v = np.array(counts.values())
return gammaln(N+1)-np.sum(gammaln(v+1))
####
# Parameters (this should be made user-configurable later)
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
def estimate_with_fixed_beta(self, data, beta):
mu = median(data)
v = mean((data - mu) ** 2)
alpha = sqrt(v * exp(gammaln(1. / beta) - gammaln(3. / beta)))
return mu, alpha
def log_prob(self, x):
mu = self.mu
alpha = self.alpha
beta = self.beta
return log(beta / (2.0 * alpha)) - gammaln(1. / beta) - power(fabs(x - mu) / alpha, beta)
def log_prob(self, x):
a, b = self['alpha'], self['beta']
return a * log(b) - gammaln(clip(a, 1e-308, 1e308)) + \
(a - 1) * log(clip(x, 1e-308, 1e308)) - b * x