def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
python类det()的实例源码
def V(self):
n1,n2,n3,n4 = self.getNodes()
V = np.array([[1, n1.x, n1.y, n1.z],
[1, n2.x, n2.y, n2.z],
[1, n3.x, n3.y, n3.z],
[1, n4.x, n4.y, n4.z]])
return la.det(V)/6
def get_gauss_pdf_value(x, mu, cov):
p = len(mu)
xs = x - mu
covi = inv(cov)
arg = -0.5 * (xs.T).dot(covi.dot(xs))
# Normalization constant
C = (((2.0 * np.pi)**p)*det(cov))**(-0.5)
prob = C * np.exp(arg)
return prob
def calc_log_Z(a, b, V_inv):
# Equation 19.
return gammaln(a) + log(sqrt(1./det(V_inv))) - a * np.log(b)
test_linalg.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
test_linalg.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def dispersion_relation_analytical(omega,beta,tau,Tpar_Tperp,kperp,kpar,gam,eta,nb,theta,k):
k2=kperp**2+kpar**2
b=0.5*kperp**2/Tpar_Tperp
inv_kpar=1./kpar
inv_kperp=1./kperp
inv_b=1./b
summand=get_sums_analytical(kperp,kpar,omega,Tpar_Tperp,nb)
M = 1j*eta*k2*inv_kpar + omega - 0.5*inv_kpar*(1j*eta*kperp**2*inv_kpar+omega)*(summand[6]-summand[5]) + 0.5*(Tpar_Tperp-1.)/Tpar_Tperp*inv_kpar*(summand[8]-summand[7]) + 1j*eta*inv_kpar*(summand[2]-summand[3])
N = 1j/beta*k2*inv_kperp + 1j*omega*inv_kperp*(summand[9]+inv_b*summand[10]-0.5*(summand[6]+3*summand[5])) - 1j*inv_kperp*(Tpar_Tperp-1.)/Tpar_Tperp*(summand[11]+inv_b*summand[12]-0.5*(summand[8]+3*summand[7]))
O = 0.5j*gam/tau*(-inv_kperp*(summand[2]-summand[3]) + 0.5*kperp*inv_kpar*(summand[6]-summand[5]))
P = 1j*kpar/beta - 1j*inv_kpar*(1j*eta*kperp**2*inv_kpar+omega)*(summand[9]+inv_b*summand[10]+0.5*(summand[6]-summand[5])) + 1j*inv_kpar*(Tpar_Tperp-1.)/Tpar_Tperp*(summand[11]+inv_b*summand[12]+0.5*(summand[8]-summand[7])) + eta*inv_kpar*(summand[2]+summand[3])
Q = -(1j*eta*k2+omega*kpar)*inv_kperp + 0.5*omega*inv_kperp*(summand[6]-summand[5]) - 0.5*(Tpar_Tperp-1.)/Tpar_Tperp*inv_kperp*(summand[8]-summand[7])
R = -0.5*gam/tau*(inv_kperp*(summand[2]+summand[3]) + kperp*inv_kpar*(summand[9] + inv_b*summand[10] + 0.5*(summand[6]-summand[5])))
S = -(1j*eta*kperp**2*inv_kpar+omega)*1j*inv_kperp*inv_kpar*Tpar_Tperp*(summand[0]+summand[1]) + 1j*inv_kpar*inv_kperp*(Tpar_Tperp-1)*(summand[2]+summand[3]) + 2*eta*kperp*inv_kpar*summand[4]
T = -Tpar_Tperp*omega*inv_kperp**2*(summand[0]-summand[1]) + inv_kperp**2*(Tpar_Tperp-1)*(summand[2]-summand[3])
U = -1 - 0.5*gam/tau*(2*summand[4]+Tpar_Tperp*inv_kpar*(summand[0]+summand[1]))
global mat
mat=[[M,N,O],[P,Q,R],[S,T,U]]
if det(mat)>1: print(summand[6],-summand[5],file=outfile)
return det(mat)
#wrapper for analytical or numerical dispersion relation
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def update (self):
pass
# def execute(self, refholder):
# input_matrix = refholder.matrices[self.inputs[0].links[0].from_socket.matrix_ref]
# answer_matrix = np.zeros(16)
# if la.det(input_matrix) == 0:
# print("Matrix has no inverse")
# else:
# answer_matrix = la.inv(input_matrix)
# self.outputs[0].matrix_ref = refholder.getRefForMatrix(answer_matrix)
def __init__(self, ctr, am):
self.n = len(ctr) # dimension
self.ctr = np.array(ctr) # center coordinates
self.am = np.array(am) # precision matrix (inverse of covariance)
# Volume of ellipsoid is the volume of an n-sphere divided
# by the (determinant of the) Jacobian associated with the
# transformation, which by definition is the precision matrix.
self.vol = vol_prefactor(self.n) / np.sqrt(linalg.det(self.am))
# The eigenvalues (l) of `a` are (a^-2, b^-2, ...) where
# (a, b, ...) are the lengths of principle axes.
# The eigenvectors (v) are the normalized principle axes.
l, v = linalg.eigh(self.am)
if np.all((l > 0.) & (np.isfinite(l))):
self.axlens = 1. / np.sqrt(l)
else:
raise ValueError("The input precision matrix defining the "
"ellipsoid {0} is apparently singular with "
"l={1} and v={2}.".format(self.am, l, v))
# Scaled eigenvectors are the axes, where `axes[:,i]` is the
# i-th axis. Multiplying this matrix by a vector will transform a
# point in the unit n-sphere to a point in the ellipsoid.
self.axes = np.dot(v, np.diag(self.axlens))
# Amount by which volume was increased after initialization (i.e.
# cumulative factor from `scale_to_vol`).
self.expand = 1.
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def correct(self, r):
"""Perform correction (measurement) update."""
zhat, H = r.mfn(self.x)
dz = r.z - zhat
S = H @ self.P @ H.T + r.R
SI = inv(S)
K = self.P @ H.T @ SI
self.x += K @ dz
self.P -= K @ H @ self.P
score = dz.T @ SI @ dz / 2.0 + ln(2 * pi * sqrt(det(S)))
self._calc_bbox()
return float(score)
def nll(self, r):
"""Get the nll score of assigning a measurement to the filter."""
zhat, H = r.mfn(self.x)
dz = r.z - zhat
S = H @ self.P @ H.T + r.R
score = dz.T @ inv(S) @ dz / 2.0 + ln(2 * pi * sqrt(det(S)))
return float(score)
def _cov_and_inv(self, n, indices):
"""
Calculate covariance around local support vector
and also the inverse
"""
cov = self._cov(indices, n)
det = la.det(cov)
while det <= 0:
cov += sp.identity(cov.shape[0]) * self.EPS
det = la.det(cov)
inv_cov = la.inv(cov)
return cov, inv_cov, det
def weights_rbf(self, unit_sp, hypers):
# BQ weights for RBF kernel with given hypers, computations adopted from the GP-ADF code [Deisenroth] with
# the following assumptions:
# (A1) the uncertain input is zero-mean with unit covariance
# (A2) one set of hyper-parameters is used for all output dimensions (one GP models all outputs)
d, n = unit_sp.shape
# GP kernel hyper-parameters
alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var']
assert len(el) == d
# pre-allocation for convenience
eye_d, eye_n = np.eye(d), np.eye(n)
iLam1 = np.atleast_2d(np.diag(el ** -1)) # sqrt(Lambda^-1)
iLam2 = np.atleast_2d(np.diag(el ** -2))
inp = unit_sp.T.dot(iLam1) # sigmas / el[:, na] (x - m)^T*sqrt(Lambda^-1) # (numSP, xdim)
K = np.exp(2 * np.log(alpha) - 0.5 * maha(inp, inp))
iK = cho_solve(cho_factor(K + jitter * eye_n), eye_n)
B = iLam2 + eye_d # (D, D)
c = alpha ** 2 / np.sqrt(det(B))
t = inp.dot(inv(B)) # inn*(P + Lambda)^-1
l = np.exp(-0.5 * np.sum(inp * t, 1)) # (N, 1)
zet = 2 * np.log(alpha) - 0.5 * np.sum(inp * inp, 1)
inp = inp.dot(iLam1)
R = 2 * iLam2 + eye_d
t = 1 / np.sqrt(det(R))
L = np.exp((zet[:, na] + zet[:, na].T) + maha(inp, -inp, V=0.5 * inv(R)))
q = c * l # evaluations of the kernel mean map (from the viewpoint of RHKS methods)
# mean weights
wm_f = q.dot(iK)
iKQ = iK.dot(t * L)
# covariance weights
wc_f = iKQ.dot(iK)
# cross-covariance "weights"
wc_fx = np.diag(q).dot(iK)
# used for self.D.dot(x - mean).dot(wc_fx).dot(fx)
self.D = inv(eye_d + np.diag(el ** 2)) # S(S+Lam)^-1; for S=I, (I+Lam)^-1
# model variance; to be added to the covariance
# this diagonal form assumes independent GP outputs (cov(f^a, f^b) = 0 for all a, b: a neq b)
self.model_var = np.diag((alpha ** 2 - np.trace(iKQ)) * np.ones((d, 1)))
return wm_f, wc_f, wc_fx
def weights_rbf(self, unit_sp, hypers):
# BQ weights for RBF kernel with given hypers, computations adopted from the GP-ADF code [Deisenroth] with
# the following assumptions:
# (A1) the uncertain input is zero-mean with unit covariance
# (A2) one set of hyper-parameters is used for all output dimensions (one GP models all outputs)
d, n = unit_sp.shape
# GP kernel hyper-parameters
alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var']
assert len(el) == d
# pre-allocation for convenience
eye_d, eye_n = np.eye(d), np.eye(n)
iLam1 = np.atleast_2d(np.diag(el ** -1)) # sqrt(Lambda^-1)
iLam2 = np.atleast_2d(np.diag(el ** -2))
inp = unit_sp.T.dot(iLam1) # sigmas / el[:, na] (x - m)^T*sqrt(Lambda^-1) # (numSP, xdim)
K = np.exp(2 * np.log(alpha) - 0.5 * maha(inp, inp))
iK = cho_solve(cho_factor(K + jitter * eye_n), eye_n)
B = iLam2 + eye_d # (D, D)
c = alpha ** 2 / np.sqrt(det(B))
t = inp.dot(inv(B)) # inn*(P + Lambda)^-1
l = np.exp(-0.5 * np.sum(inp * t, 1)) # (N, 1)
zet = 2 * np.log(alpha) - 0.5 * np.sum(inp * inp, 1)
inp = inp.dot(iLam1)
R = 2 * iLam2 + eye_d
t = 1 / np.sqrt(det(R))
L = np.exp((zet[:, na] + zet[:, na].T) + maha(inp, -inp, V=0.5 * inv(R)))
q = c * l # evaluations of the kernel mean map (from the viewpoint of RHKS methods)
# mean weights
wm_f = q.dot(iK)
iKQ = iK.dot(t * L)
# covariance weights
wc_f = iKQ.dot(iK)
# cross-covariance "weights"
wc_fx = np.diag(q).dot(iK)
self.iK = iK
# used for self.D.dot(x - mean).dot(wc_fx).dot(fx)
self.D = inv(eye_d + np.diag(el ** 2)) # S(S+Lam)^-1; for S=I, (I+Lam)^-1
# model variance; to be added to the covariance
# this diagonal form assumes independent GP outputs (cov(f^a, f^b) = 0 for all a, b: a neq b)
self.model_var = np.diag((alpha ** 2 - np.trace(iKQ)) * np.ones((d, 1)))
return wm_f, wc_f, wc_fx