def updateGamma(self):
task_matrices = np.zeros((self.n_tasks, self.num_feats, self.num_feats))
for m in range(self.n_tasks):
rho_vector = rhoFunction(np.array(self.xi[m]))
rho_vector = rho_vector.reshape((1,-1)) # Make rho vector 2D
task_X = self.task_dict[m]['X']
# Note that the transposing doesn't exactly match the paper because our data format is slightly different
rho_matrix = abs(rho_vector) * task_X.T
task_matrices[m,:,:] = np.dot(rho_matrix, task_X)
for k in range(self.K):
inner_sum = np.zeros((self.num_feats,self.num_feats))
for m in range(self.n_tasks):
inner_sum = inner_sum + self.phi[m,k] * task_matrices[m,:,:]
self.gamma[k] = la.inv(la.inv(self.sigma) + 2*inner_sum)
if self.debug:
print "gamma computation {0}".format(k), la.det(la.inv(self.sigma) + 2*inner_sum)
python类det()的实例源码
def build_gradient_matrix(self, x):
"""
:param x: grid index
:return:
"""
if x == 0 or x == self.d - 1:
raise ValueError("Boundary Conditions not treated here")
nabla_matrix = np.zeros((2, 2))
nabla_minus = self.grid[x - 1] - self.grid[x]
nabla_plus = self.grid[x + 1] - self.grid[x]
nabla_matrix[0, 0] = nabla_minus
nabla_matrix[0, 1] = nabla_plus
nabla_matrix[1, 0] = nabla_minus * nabla_minus
nabla_matrix[1, 1] = nabla_plus * nabla_plus
if linalg.det(nabla_matrix) == 0:
raise ValueError("Nabla is not invertible")
return nabla_matrix
def normal_density_at_zero(m, c):
""" Compute the normal density with given mean and covariance at zero.
Parameters
----------
m : vector
Mean.
c : ndarray
Covariance matrix. Assumption: c is square matrix and its size is
compatible with that of m.
Returns
-------
g : float
Computed density value.
"""
dim = len(m)
g = 1 / ((2 * pi)**(dim / 2) * sqrt(absolute(det(c)))) *\
exp(-1/2 * dot(dot(m, inv(c)), m))
return g
def factor(self):
""" Factorize the camera matrix into K,R,t as P = K[R|t]. """
# factor first 3*3 part
K,R = linalg.rq(self.P[:,:3])
# make diagonal of K positive
T = diag(sign(diag(K)))
if linalg.det(T) < 0:
T[1,1] *= -1
self.K = dot(K,T)
self.R = dot(T,R) # T is its own inverse
self.t = dot(linalg.inv(self.K),self.P[:,3])
return self.K, self.R, self.t
def bic(arr1, arr2, epsilon=1e-50):
"""Bayes Information Criterion"""
# Notes: In the seminal paper "Speakers, environment and channel
# change detection and clustering via the Bayesian Information
# Criterion" by Chen and Gopalakrishnan, they use a growing window
# approach, so it's not directly comparable when using a fixed
# sliding window.
arr = np.concatenate((arr1, arr2))
detS1 = det(np.cov(arr1, rowvar=0))
detS2 = det(np.cov(arr2, rowvar=0))
N1 = arr1.shape[0]
N2 = arr2.shape[0]
N = arr.shape[0]
detS = det(np.cov(arr, rowvar=0))
d = 0.5 * N * np.log(detS) - 0.5 * N1 * np.log(detS1)\
- 0.5 * N2 * np.log(detS2)
p = arr.shape[1]
corr = args.lambdac * 0.5 * (p + 0.5 * p * (p + 1)) * np.log(N)
d -= corr
return d
def glr(arr1, arr2):
"""Generalized Likelihood Ratio"""
S1 = np.cov(arr1, rowvar=0)
S2 = np.cov(arr2, rowvar=0)
N1 = arr1.shape[0]
N2 = arr2.shape[0]
N = float(N1 + N2)
# This is COV only version, not optimized (revise) but more robust
# to environment noise conditions.
# See Ulpu thesis pages 30-31, also Gish et al. "Segregation of
# Speakers for Speech Recognition and Speaker Identification"
d = -(N / 2.0) * ((N1 / N) * np.log(det(S1)) + (N2 / N) * np.log(det(S2))
- np.log(det((N1 / N) * S1 + (N2 / N) * S2)))
# Ulpu version:
# Includes the mean, theoretically less robust
# arr = features[start:start+2*winsize]
# S = cov(arr, rowvar=0)
# d = -0.5*(N1*log(det(S1))+N2*log(det(S2))-N*log(det(S)))
return d
def bic(arr1, arr2):
"""Bayes Information Criterion."""
# Notes: In the seminal paper "Speakers, environment and channel
# change detection and clustering via the Bayesian Information
# Criterion" by Chen and Gopalakrishnan, they use a growing window
# approach, so it's not directly comparable when using a fixed
# sliding window.
arr = np.concatenate((arr1, arr2))
N1 = arr1.shape[0]
N2 = arr2.shape[0]
S1 = np.cov(arr1, rowvar=0)
S2 = np.cov(arr2, rowvar=0)
N = arr.shape[0]
S = np.cov(arr, rowvar=0)
d = 0.5 * N * np.log(det(S)) - 0.5 * N1 * np.log(det(S1))\
- 0.5 * N2 * np.log(det(S2))
p = arr.shape[1]
corr = args.lambdac * 0.5 * (p + 0.5 * p * (p + 1)) * np.log(N)
d -= corr
return d
def glr(arr1, arr2):
"""Generalized Likelihood Ratio"""
N1 = arr1.shape[0]
N2 = arr2.shape[0]
S1 = np.cov(arr1, rowvar=0)
S2 = np.cov(arr2, rowvar=0)
N = float(N1 + N2)
# This is COV only version, not optimized (revise) but more robust
# to environment noise conditions.
# See Ulpu thesis pages 30-31, also Gish et al. "Segregation of
# Speakers for Speech Recognition and Speaker Identification"
d = -(N / 2.0) * ((N1 / N) * np.log(det(S1)) + (N2 / N) * np.log(det(S2))
- np.log(det((N1 / N) * S1 + (N2 / N) * S2)))
# Ulpu version:
# Includes the mean, theoretically less robust
# arr = features[start:start+2*winsize]
# S = cov(arr, rowvar=0)
# d = -0.5*(N1*log(det(S1))+N2*log(det(S2))-N*log(det(S)))
return d
def estimation(self, y, ds):
""" Estimate KGV.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of KGV.
References
----------
Francis Bach, Michael I. Jordan. Kernel Independent Component
Analysis. Journal of Machine Learning Research, 3: 1-48, 2002.
Francis Bach, Michael I. Jordan. Learning graphical models with
Mercer kernels. International Conference on Neural Information
Processing Systems (NIPS), pages 1033-1040, 2002.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
num_of_samples = y.shape[0]
tol = num_of_samples * self.eta
r = compute_matrix_r_kcca_kgv(y, ds, self.kernel, tol, self.kappa)
i = -log(det(r)) / 2
return i
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def analytical_value_h_shannon(distr, par):
""" Analytical value of the Shannon entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Shannon entropy.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = 1/2 * log((2 * pi * exp(1))**dim * det(par["cov"]))
# = 1/2 * log(det(c)) + d / 2 * log(2*pi) + d / 2
else:
raise Exception('Distribution=?')
return h
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
""" Analytical value of the cross-entropy for the given distributions.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
par1, par2 : dictionaries
Parameters of the distribution. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
c : float
Analytical value of the cross-entropy.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)))
else:
raise Exception('Distribution=?')
return c
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def analytical_value_i_shannon(distr, par):
""" Analytical value of mutual information for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["ds"],
par["cov"] are the vector of component dimensions and the (joint)
covariance matrix.
Returns
-------
i : float
Analytical value of the Shannon mutual information.
"""
if distr == 'normal':
c, ds = par["cov"], par["ds"]
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
i = 1
for m in range(len(ds)):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
i *= det(c[ix_(idx, idx)])
i = log(i / det(c)) / 2
else:
raise Exception('Distribution=?')
return i
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def analytical_value_h_renyi(distr, alpha, par):
""" Analytical value of the Renyi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Renyi entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Renyi entropy.
References
----------
Kai-Sheng Song. Renyi information, loglikelihood and an intrinsic
distribution measure. Journal of Statistical Planning and Inference
93: 51-69, 2001.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
# We also apply the transformation rule of the Renyi entropy in
# case of linear transformations:
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = log((2*pi)**(dim / 2) * sqrt(absolute(det(par["cov"])))) -\
dim * log(alpha) / 2 / (1 - alpha)
else:
raise Exception('Distribution=?')
return h
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def analytical_value_h_sharma_mittal(distr, alpha, beta, par):
""" Analytical value of the Sharma-Mittal entropy.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal entropy.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal entropy.
par : dictionary
Parameters of the distribution. If distr = 'normal' : par["cov"]
= covariance matrix.
Returns
-------
h : float
Analytical value of the Sharma-Mittal entropy.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr == 'normal':
# par = {"cov": c}
c = par['cov']
dim = c.shape[0] # =c.shape[1]
h = (((2*pi)**(dim / 2) * sqrt(absolute(det(c))))**(1 - beta) /
alpha**(dim * (1 - beta) / (2 * (1 - alpha))) - 1) / \
(1 - beta)
else:
raise Exception('Distribution=?')
return h
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def analytical_value_i_renyi(distr, alpha, par):
""" Analytical value of the Renyi mutual information.
Parameters
----------
distr : str
Name of the distribution.
alpha : float
Parameter of the Renyi mutual information.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["cov"]
is the covariance matrix.
Returns
-------
i : float
Analytical value of the Renyi mutual information.
"""
if distr == 'normal':
c = par["cov"]
t1 = -alpha / 2 * log(det(c))
t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
i = 1 / (alpha - 1) * (t1 + t2 - t3)
else:
raise Exception('Distribution=?')
return i
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
""" Analytical value of Hellinger distance for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Hellinger distance.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
# "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
diffm = m1 - m2
avgc = (c1 + c2) / 2
inv_avgc = inv(avgc)
d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
exp(-dot(diffm, dot(inv_avgc, diffm))/8) # D^2
d = sqrt(d)
else:
raise Exception('Distribution=?')
return d
def logsqrtdet(self):
# Sigma = L^T L -- but be careful: only the lower triangle and
# diagonal of L are actually initialized; the upper triangle is
# garbage.
L, _lower = self._cholesky
# det Sigma = det L^T L = det L^T det L = (det L)^2. Since L is
# triangular, its determinant is the product of its diagonal. To
# compute log sqrt(det Sigma) = log det L, we sum the logs of its
# diagonal.
return np.sum(np.log(np.diag(L)))
def logsqrtdet(self):
return (1/2)*np.log(la.det(self._Sigma))
def logpdf(X, Mu, Sigma):
"""Multivariate normal log pdf."""
# This is the multivariate normal log pdf for an array X of n
# outputs, an array Mu of n means, and an n-by-n positive-definite
# covariance matrix Sigma. The direct-space density is:
#
# P(X | Mu, Sigma)
# = ((2 pi)^n det Sigma)^(-1/2)
# exp((-1/2) (X - Mu)^T Sigma^-1 (X - Mu)),
#
# We want this in log-space, so we have
#
# log P(X | Mu, Sigma)
# = (-1/2) (X - Mu)^T Sigma^-1 (X - Mu) - log ((2 pi)^n det Sigma)^(1/2)
# = (-1/2) (X - Mu)^T Sigma^-1 (X - Mu)
# - (n/2) log (2 pi) - (1/2) log det Sigma.
#
n = len(X)
assert X.shape == (n,)
assert Mu.shape == (n,)
assert Sigma.shape == (n, n)
assert np.all(np.isfinite(X))
assert np.all(np.isfinite(Mu))
assert np.all(np.isfinite(Sigma))
X_ = X - Mu
covf = _covariance_factor(Sigma)
logp = -np.dot(X_.T, covf.solve(X_)/2.)
logp -= (n/2.)*np.log(2*np.pi)
logp -= covf.logsqrtdet()
# Convert 1x1 matrix to float.
return float(logp)
def loglikelihood_path(self, x, y):
A, B, D, F = self.A, self.B, self.D, self.F
k, T = y.shape
FF = F @ F.T
FFinv = la.inv(FF)
temp = y[:, 1:] - y[:, :-1] - D @ x[:, :-1]
obs = temp * FFinv * temp
obssum = np.cumsum(obs)
scalar = (np.log(la.det(FF)) + k*np.log(2*np.pi))*np.arange(1, T)
return -(.5)*(obssum + scalar)
def factor(self):
# factor first 3*3 part
K,R = linalg.rq(self.P[:,:3])
# make diagonal of K positive
T = diag(sign(diag(K)))
if linalg.det(T) < 0:
T[1,1] *= -1
self.K = dot(K,T)
self.R = dot(T,R) # T is its own inverse
self.t = dot(linalg.inv(self.K),self.P[:,3])
return self.K, self.R, self.t
def bic(arr1, arr2, arr, i=0, saved={}):
"""Bayes Information Criterion
Notes: In the seminal paper "Speakers, environment and channel change
detection and clustering via the Bayesian Information Criterion" by Chen
and Gopalakrishnan, they use a growing window approach, so it's not
directly comparable when using a fixed sliding window.
In BIC, we can save the first matrix calculations since in growing windows
these keep repeating all the time, and we are saving just one float so
it's also memory efficient and saves a lot of time (Antonio)"""
if i in saved:
c1 = saved[i]
else:
S1 = np.cov(arr1, rowvar=0)
N1 = arr1.shape[0]
c1 = 0.5 * N1 * np.log(det(S1))
saved[i] = c1
S2 = np.cov(arr2, rowvar=0)
N2 = arr2.shape[0]
N = arr.shape[0]
S = np.cov(arr, rowvar=0)
d = 0.5 * N * np.log(det(S)) - c1\
- 0.5 * N2 * np.log(det(S2))
p = arr.shape[1]
corr = args.lambdac * 0.5 * (p + 0.5 * p * (p + 1)) * np.log(N)
d -= corr
return d
def matching_point(en, rot, V, R, mu):
""" estimate matching point for inward and outward solutions position
based on the determinant of the R-matrix.
Parameters
----------
en : float
potential energy of the solution
rot : int
rotational quantum number J
V : numpy 3d array
potential curve and couplings matrix
R : numpy 1d array
internuclear distance grid
mu : float
reduced mass in kg
Returns
-------
mx : int
matching point grid index
"""
oo, n, m = V.shape
Vm = min([V[-1][j][j].min() for j in range(n)]) # lowest dissociation energy
if en > Vm:
return oo-1
else:
Vnn = np.transpose(V)[-1][-1] # -1 -1 highest PEC?
mx = np.abs(Vnn - en).argmin()
WI = WImat(en, rot, V, R, mu)
Rm = RImat(WI, mx)
while linalg.det(Rm[mx]) > 1:
mx -= 1
return mx
def eigen(energy, rot, mx, V, R, mu):
""" determine eigen energy solution based.
Parameters
----------
energy : float
energy (eV) of the attempted solution
rot : int
rotational quantum number
mx : int
matching point index, for inward and outward solutions
V : numpy 3d array
potential energy curve and coupling matrix
R : numpy 1d array
internuclear distance grid
mu : float
reduced mass in kg
Returns
-------
eigenvalue : float
energy of the solution
"""
WI = WImat(energy, rot, V, R, mu)
RI = RImat(WI, mx)
# | R_mx - R^-1_mx+1 |
return linalg.det(linalg.inv(RI[mx])-RI[mx+1])
def compute_phi_prior(self):
phi = 0
for d in range(self.Dout):
# s, a = npalg.slogdet(self.Kuu[d])
a = np.log(npalg.det(self.Kuu[d]))
phi += 0.5 * a
return phi
def compute_phi_posterior(self):
phi = 0
for d in range(self.Dout):
mud_val = self.mu[d].get_value()
Sud_val = self.Su[d].get_value()
# s, a = npalg.slogdet(Sud_val)
a = np.log(npalg.det(Sud_val))
phi += 0.5 * np.dot(mud_val.T, np.dot(npalg.inv(Sud_val), mud_val))[0, 0]
phi += 0.5 * a
return phi
def compute_phi_cavity(self):
phi = 0
for d in range(self.Dout):
muhatd_val = self.muhat[d].get_value()
Suhatd_val = self.Suhat[d].get_value()
# s, a = npalg.slogdet(Sud_val)
a = np.log(npalg.det(Suhatd_val))
phi += 0.5 * np.dot(muhatd_val.T, np.dot(npalg.inv(Suhatd_val), muhatd_val))[0, 0]
phi += 0.5 * a
return phi
def projection(X, Y):
rankX = rank(X)
rankY = rank(Y)
# rank, or dimension, or the original space
rankO = rankX + rankY
# check if two subspaces have the same shapes
if X.shape != Y.shape:
raise Exception('The two subspaces do not have the same shapes')
# check if O is singular
if la.det(np.hstack((X, Y))) == 0:
raise Exception('X + Y is not the direct sum of the original space')
# check whether each subspace is of full column/row rank
if rankX < min(X.shape):
raise Exception('subspace X is not of full rank')
elif rankY < min(Y.shape):
raise Exception('subspace Y is not of full rank')
# X and Y are of full column rank
elif rankX == X.shape[1] & rankY == Y.shape[1]:
return np.hstack((X, np.zeros((X.shape[0], rankO - rankX)))).dot(la.inv(np.hstack((X, Y))))
# X and Y are of full row rank
elif rankX == X.shape[0] & rankY == Y.shape[0]:
return np.vstack((X, np.zeros((rankO - rankX, X.shape[1])))).dot(la.inv(np.vstack(X, Y)))
# orthogonal projection matrix
def estimation(self, y):
""" Estimate Shannon entropy.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
Returns
-------
h : float
Estimated Shannon entropy.
References
----------
Quing Wang, Sanjeev R. Kulkarni, and Sergio Verdu. Universal
estimation of information measures for analog sources. Foundations
And Trends In Communications And Information Theory, 5:265-353,
2009.
Examples
--------
h = co.estimation(y,ds)
"""
num_of_samples, dim = y.shape # number of samples, dimension
# estimate the mean and the covariance of y:
m = mean(y, axis=0)
c = cov(y, rowvar=False) # 'rowvar=False': 1 row = 1 observation
# entropy of N(m,c):
if dim == 1:
det_c = c # det(): 'expected square matrix' exception
# multivariate_normal(): 'cov must be 2 dimensional and square'
# exception:
c = array([[c]])
else:
det_c = det(c)
h_normal = 1/2 * log((2*pi*exp(1))**dim * det_c)
# generate samples from N(m,c):
y_normal = multivariate_normal(m, c, num_of_samples)
h = h_normal - self.kl_co.estimation(y, y_normal)
return h
x_analytical_values.py 文件源码
项目:adversarial-variational-bayes
作者: gdikov
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def analytical_value_k_prob_product(distr1, distr2, rho, par1, par2):
""" Analytical value of the probability product kernel.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
rho: float, >0
Parameter of the probability product kernel.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the probability product kernel.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
# inv1, inv2, inv12:
inv1, inv2 = inv(c1), inv(c2)
inv12 = inv(inv1+inv2)
m12 = dot(inv1, m1) + dot(inv2, m2)
exp_arg = \
dot(m1, dot(inv1, m1)) + dot(m2, dot(inv2, m2)) -\
dot(m12, dot(inv12, m12))
k = (2 * pi)**((1 - 2 * rho) * dim / 2) * rho**(-dim / 2) *\
absolute(det(inv12))**(1 / 2) * \
absolute(det(c1))**(-rho / 2) * \
absolute(det(c2))**(-rho / 2) * exp(-rho / 2 * exp_arg)
else:
raise Exception('Distribution=?')
return k