def compute_cavity(self, idxs, alpha):
# deletion
p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :]
k_i = self.Kfu[idxs, :]
k_ii = self.Kff_diag[idxs][:, np.newaxis]
gamma = self.gamma
beta = self.beta
h_si = p_i - np.einsum('dab,nb->nda', beta, k_i)
variance_i = self.variances[idxs, :]
mean_i = self.means[idxs, :]
dlogZd_dmi2 = 1.0 / (variance_i/alpha -
np.sum(k_i[:, np.newaxis, :] * h_si, axis=2))
dlogZd_dmi = -dlogZd_dmi2 * (mean_i -
np.sum(k_i[:, np.newaxis, :] * gamma, axis=2))
hd1 = h_si * dlogZd_dmi[:, :, np.newaxis]
hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis]
gamma_si = gamma + hd1
beta_si = beta - hd2h
# projection
h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i)
m_si_i = np.einsum('na,nda->nd', k_i, gamma_si)
v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i)
return m_si_i, v_si_ii, [h, beta_si, gamma_si]
python类einsum()的实例源码
def project3Dto2D(self, Li, idxs):
"""
Project 3D point to 2D
:param Li: joints in normalized 3D
:param idxs: frames specified by subset
:return: 2D points, in normalized 2D coordinates
"""
if not isinstance(idxs, numpy.ndarray):
idxs = numpy.asarray([idxs])
# 3D -> 2D projection also shift by M to cropped window
Li_glob3D = (numpy.reshape(Li, (len(idxs), self.numJoints, 3))*self.Di_scale[idxs][:, None, None]+self.Di_off3D[idxs][:, None, :]).reshape((len(idxs)*self.numJoints, 3))
Li_glob3D_hom = numpy.concatenate([Li_glob3D, numpy.ones((len(idxs)*self.numJoints, 1), dtype='float32')], axis=1)
Li_glob2D_hom = numpy.dot(Li_glob3D_hom, self.cam_proj.T)
Li_glob2D = (Li_glob2D_hom[:, 0:3] / Li_glob2D_hom[:, 3][:, None]).reshape((len(idxs), self.numJoints, 3))
Li_img2D_hom = numpy.einsum('ijk,ikl->ijl', Li_glob2D, self.Di_trans2D[idxs])
Li_img2D = (Li_img2D_hom[:, :, 0:2] / Li_img2D_hom[:, :, 2][:, :, None]).reshape((len(idxs), self.numJoints*2))
Li_img2Dcrop = (Li_img2D - (self.Di.shape[3]/2.)) / (self.Di.shape[3]/2.)
return Li_img2Dcrop
def compute_dr_wrt(self, wrt):
if wrt is not self.v:
return None
v = self.v.r.reshape(-1, 3)
blocks = -np.einsum('ij,ik->ijk', v, v) * (self.ss**(-3./2.)).reshape((-1, 1, 1))
for i in range(3):
blocks[:, i, i] += self.s_inv
if True: # pylint: disable=using-constant-test
data = blocks.ravel()
indptr = np.arange(0, (self.v.r.size+1)*3, 3)
indices = col(np.arange(0, self.v.r.size))
indices = np.hstack([indices, indices, indices])
indices = indices.reshape((-1, 3, 3))
indices = indices.transpose((0, 2, 1)).ravel()
result = sp.csc_matrix((data, indices, indptr), shape=(self.v.r.size, self.v.r.size))
return result
else:
matvec = lambda x: np.einsum('ijk,ik->ij', blocks, x.reshape((blocks.shape[0], 3))).ravel()
return sp.linalg.LinearOperator((self.v.r.size, self.v.r.size), matvec=matvec)
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
x = np.ones((5, 5))
out = np.ones(10)[::2]
correct_base = np.ones(10)
correct_base[::2] = 5
# Always worked (inner iteration is done with 0-stride):
np.einsum('mi,mi,mi->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 1:
out = np.ones(10)[::2]
np.einsum('im,im,im->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 2, buffering causes x to be contiguous but
# special cases do not catch the operation before:
out = np.ones((2, 2, 2))[..., 0]
correct_base = np.ones((2, 2, 2))
correct_base[..., 0] = 2
x = np.ones((2, 2), np.float32)
np.einsum('ij,jk->ik', x, x, out=out)
assert_array_equal(out.base, correct_base)
def test_exKxz_pairwise(self):
covall = np.array([self.Xcov, self.Xcovc])
for k in self.kernels:
with self.test_context():
if isinstance(k, ekernels.Linear):
continue
k.compile()
exKxz = k.compute_exKxz_pairwise(self.Z, self.Xmu, covall)
Kxz = k.compute_K(self.Xmu[:-1, :], self.Z) # NxM
xKxz = np.einsum('nm,nd->nmd', Kxz, self.Xmu[1:, :])
self.assertTrue(np.allclose(xKxz, exKxz))
# def test_exKxz(self):
# for k in self.kernels:
# with self.test_session():
# if isinstance(k, ekernels.Linear):
# continue
# k.compile()
# exKxz = k.compute_exKxz(self.Z, self.Xmu, self.Xcov)
# Kxz = k.compute_K(self.Xmu, self.Z) # NxM
# xKxz = np.einsum('nm,nd->nmd', Kxz, self.Xmu)
# self.assertTrue(np.allclose(xKxz, exKxz))
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)
if 'm' in self.params or 'c' in self.params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in self.params:
if self.covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self.covariance_type in ('tied', 'full'):
# posteriors: (nt, nc); obs: (nt, nf); obs: (nt, nf)
# -> (nc, nf, nf)
stats['obs*obs.T'] += np.einsum(
'ij,ik,il->jkl', posteriors, obs, obs)
def __init__(self, matrix, w):
W = np.sum(w)
self.w = w
self.X = matrix
self.left = None
self.right = None
self.mu = np.einsum('ij,i->j', self.X, w)/W
diff = self.X - np.tile(self.mu, [np.shape(self.X)[0], 1])
t = np.einsum('ij,i->ij', diff, np.sqrt(w))
self.cov = (t.T @ t)/W + 1e-5*np.eye(3)
self.N = self.X.shape[0]
V, D = np.linalg.eig(self.cov)
self.lmbda = np.max(np.abs(V))
self.e = D[np.argmax(np.abs(V))]
# S is measurements vector - dim nxd
# w is weights vector - dim n
def _solve_2D2(self, X, Z):
"""Solves :math:`Z^T N^{-1}X`, where :math:`X`
and :math:`Z` are 2-d arrays.
"""
ZNX = np.dot(Z.T / self._nvec, X)
for slc, jv in zip(self._slices, self._jvec):
if slc.stop - slc.start > 1:
Zblock = Z[slc, :]
Xblock = X[slc, :]
niblock = 1 / self._nvec[slc]
beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv)
zn = np.dot(niblock, Zblock)
xn = np.dot(niblock, Xblock)
ZNX -= beta * np.outer(zn.T, xn)
return ZNX
def ss_framerotate(mjd, planet, x, y, z, dz,
offset=None, equatorial=False):
"""
Rotate planet trajectory given as (n,3) tensor,
by ecliptic Euler angles x, y, z, and by z rate
dz. The rate has units of rad/year, and is referred
to offset 2010/1/1. dates must be given in MJD.
"""
if equatorial:
planet = eq2ecl_vec(planet)
E = euler_vec(z + dz * (mjd - t_offset) / 365.25, y, x,
planet.shape[0])
planet = np.einsum('ijk,ik->ij', E, planet)
if offset is not None:
planet = np.array(offset) + planet
if equatorial:
planet = ecl2eq_vec(planet)
return planet
def _log_prod_students_t(self, i, mu, log_prod_var, inv_var, v):
"""
Return the value of the log of the product of the univariate Student's
t PDFs at `X[i]`.
"""
delta = self.X[i, :] - mu
return (
self.D * (
self._cached_gammaln_by_2[v + 1] - self._cached_gammaln_by_2[v]
- 0.5*self._cached_log_v[v] - 0.5*self._cached_log_pi
)
- 0.5*log_prod_var
- (v + 1.)/2. * (np.log(1. + 1./v * np.square(delta) * inv_var)).sum()
)
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
# Below is slightly faster than np.sum, see http://stackoverflow.com/questions/
# 18365073/why-is-numpys-einsum-faster-than-numpys-built-in-functions
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size)
self.f.W.data[...] = _uniform(*self.f.W.data.shape)
self.f.V1.data[...] = _uniform(*self.f.V1.data.shape)
self.f.V2.data[...] = _uniform(*self.f.V2.data.shape)
self.f.b.data[...] = _uniform(*self.f.b.data.shape)
self.f.zerograds()
self.W = self.f.W.data.copy()
self.V1 = self.f.V1.data.copy()
self.V2 = self.f.V2.data.copy()
self.b = self.f.b.data.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = (
numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W) +
self.e1.dot(self.V1) + self.e2.dot(self.V2) + self.b)
def inside_triangle(point,triangles):
v0 = triangles[:,2]-triangles[:,0]
v1 = triangles[:,1]-triangles[:,0]
v2 = point-triangles[:,0]
dot00 = np.einsum('ij,ij->i',v0,v0)
dot01 = np.einsum('ij,ij->i',v0,v1)
dot02 = np.einsum('ij,ij->i',v0,v2)
dot11 = np.einsum('ij,ij->i',v1,v1)
dot12 = np.einsum('ij,ij->i',v1,v2)
invDenom = 1./(dot00 * dot11-dot01*dot01)
u = np.float16((dot11 * dot02 - dot01 * dot12)*invDenom)
v = np.float16((dot00 * dot12 - dot01 * dot02)*invDenom)
return (u>=0) & (v>=0) & (u+v<=1)
def omgp_model_bound(omgp):
''' Calculate the part of the omgp bound which does not depend
on the response variable.
'''
GP_bound = 0.0
LBs = []
# Precalculate the bound minus data fit,
# and LB matrices used for data fit term.
for i, kern in enumerate(omgp.kern):
K = kern.K(omgp.X)
B_inv = np.diag(1. / ((omgp.phi[:, i] + 1e-6) / omgp.variance))
Bi, LB, LBi, Blogdet = pdinv(K + B_inv)
LBs.append(LB)
# Penalty
GP_bound -= 0.5 * Blogdet
# Constant
GP_bound -= 0.5 * omgp.D * np.einsum('j,j->', omgp.phi[:, i], np.log(2 * np.pi * omgp.variance))
model_bound = GP_bound + omgp.mixing_prop_bound() + omgp.H
return model_bound, LBs
def _predict_output_derivatives(self, x):
n = x.shape[0]
nt = self.nt
ny = self.training_points[None][0][1].shape[1]
num = self.num
dy_dstates = np.empty(n * num['dof'])
self.rbfc.compute_jac(n, x.flatten(), dy_dstates)
dy_dstates = dy_dstates.reshape((n, num['dof']))
dstates_dytl = np.linalg.inv(self.mtx)
ones = np.ones(self.nt)
arange = np.arange(self.nt)
dytl_dyt = csc_matrix((ones, (arange, arange)), shape=(num['dof'], self.nt))
dy_dyt = (dytl_dyt.T.dot(dstates_dytl.T).dot(dy_dstates.T)).T
dy_dyt = np.einsum('ij,k->ijk', dy_dyt, np.ones(ny))
return {None: dy_dyt}
def get_power_spectral_density_matrix(observation, mask=None):
"""
Calculates the weighted power spectral density matrix.
This does not yet work with more than one target mask.
:param observation: Complex observations with shape (bins, sensors, frames)
:param mask: Masks with shape (bins, frames) or (bins, 1, frames)
:return: PSD matrix with shape (bins, sensors, sensors)
"""
bins, sensors, frames = observation.shape
if mask is None:
mask = np.ones((bins, frames))
if mask.ndim == 2:
mask = mask[:, np.newaxis, :]
normalization = np.maximum(np.sum(mask, axis=-1, keepdims=True), 1e-6)
psd = np.einsum('...dt,...et->...de', mask * observation,
observation.conj())
psd /= normalization
return psd
def get_mvdr_vector(atf_vector, noise_psd_matrix):
"""
Returns the MVDR beamforming vector.
:param atf_vector: Acoustic transfer function vector
with shape (..., bins, sensors)
:param noise_psd_matrix: Noise PSD matrix
with shape (bins, sensors, sensors)
:return: Set of beamforming vectors with shape (..., bins, sensors)
"""
while atf_vector.ndim > noise_psd_matrix.ndim - 1:
noise_psd_matrix = np.expand_dims(noise_psd_matrix, axis=0)
# Make sure matrix is hermitian
noise_psd_matrix = 0.5 * (
noise_psd_matrix + np.conj(noise_psd_matrix.swapaxes(-1, -2)))
numerator = solve(noise_psd_matrix, atf_vector)
denominator = np.einsum('...d,...d->...', atf_vector.conj(), numerator)
beamforming_vector = numerator / np.expand_dims(denominator, axis=-1)
return beamforming_vector
def apply_sdw_mwf(mix, target_psd_matrix, noise_psd_matrix, mu=1, corr=None):
"""
Apply speech distortion weighted MWF: h = Tpsd * e1 / (Tpsd + mu*Npsd)
:param mix: the signal complex FFT
:param target_psd_matrix (bins, sensors, sensors)
:param noise_psd_matrix
:param mu: the lagrange factor
:return
"""
bins, sensors, frames = mix.shape
ref_vector = np.zeros((sensors,1), dtype=np.float)
if corr is None:
ref_ch = 0
else: # choose the channel with highest correlation with the others
corr=corr.tolist()
while len(corr) > sensors:
corr.remove(np.min(corr))
ref_ch=np.argmax(corr)
ref_vector[ref_ch,0]=1
mwf_vector = solve(target_psd_matrix + mu*noise_psd_matrix, target_psd_matrix[:,:,ref_ch])
return np.einsum('...a,...at->...t', mwf_vector.conj(), mix)
test_einsum.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_against_numpy_einsum(self):
""" Test against numpy.einsum """
a = np.arange(60.).reshape(3,4,5)
b = np.arange(24.).reshape(4,3,2)
stream = [a, b]
from_numpy = np.einsum('ijk,jil->kl', a, b)
from_stream = last(ieinsum(stream, 'ijk,jil->kl'))
self.assertSequenceEqual(from_numpy.shape, from_stream.shape)
self.assertTrue(np.allclose(from_numpy, from_stream))