def aryule(c, k):
"""Solve Yule-Walker equation.
Args:
c (numpy array): Coefficients (i.e. autocorrelation)
k (int): Assuming the AR(k) model
Returns:
numpy array: k model parameters
Some formulations solve: C a = -c,
but we actually solve C a = c.
"""
a = np.zeros(k)
# ignore a singular matrix
C = toeplitz(c[:k])
if not np.all(C == 0.0) and np.isfinite(ln.cond(C)):
a = np.dot(ln.inv(C), c[1:])
return a
python类inv()的实例源码
def estimate_X_TLS(y,H):
""" Estimator of x for the Linear Model using Total Least Square (TLS). As compared to the classical Least Squares Estimator, the TLS estimator is more suited when H is not exactly known or contained with noise [GOL80]_.
.. [GOL80] Golub, Gene H., and Charles F. Van Loan. "An analysis of the total least squares problem." SIAM Journal on Numerical Analysis 17.6 (1980): 883-893.
"""
N,L=H.shape
H=np.matrix(H)
Z=np.hstack([H,y])
U,S,V=lg.svd(Z)
V=np.matrix(V)
V=V.H
VHY=V[:L,L:];
VYY=V[L:,L:];
x= -VHY*lg.inv(VYY);
return x
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def make_eigvals_positive(am, targetprod):
"""For the symmetric square matrix `am`, increase any zero eigenvalues
such that the total product of eigenvalues is greater or equal to
`targetprod`. Returns a (possibly) new, non-singular matrix."""
w, v = linalg.eigh(am) # use eigh since a is symmetric
mask = w < 1.e-10
if np.any(mask):
nzprod = np.product(w[~mask]) # product of nonzero eigenvalues
nzeros = mask.sum() # number of zero eigenvalues
new_val = max(1.e-10, (targetprod / nzprod) ** (1. / nzeros))
w[mask] = new_val # adjust zero eigvals
am_new = np.dot(np.dot(v, np.diag(w)), linalg.inv(v)) # re-form cov
else:
am_new = am
return am_new
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def compSpecSample(self, angle):
"""
Computes the spectrum in a sample with Capon beamformer.
MUST compute the covariance matrix (call `compCov()`) before calling
this function.
Parameters
----------
angle : float
Direction-of-arrival (DOA) angle in range [0, pi).
Returns
-------
p : float
The response of Capon beamformer.
"""
a = self.sarr.steer(angle)
p = 1. / (a.T.conj().dot(inv(self.r).dot(a)))
# Discard imaginary part
return p.real
def compSpecSample(self, angle):
"""
Computes the spatial spectrum response in a specified angle with FLOM
matrix.
MUST compute the FLOM matrix (call `compFLOM()`) before calling this
function.
Parameters
----------
angle : float
Direction-of-arrival (DOA) angle in range [0, pi).
Returns
-------
p : float
The response of spatial spectrum.
"""
a = self.sarr.steer(angle)
p = 1. / (a.T.conj().dot(inv(self.gamma).dot(a)))
# Discard imaginary part
return p.real
def _mvee(self, points, tolerance):
# Taken from: http://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
points = np.asmatrix(points)
n, d = points.shape
q = np.column_stack((points, np.ones(n))).T
err = tolerance + 1.0
u = np.ones(n) / n
while err > tolerance:
# assert u.sum() == 1 # invariant
x = q * np.diag(u) * q.T
m = np.diag(q.T * la.inv(x) * q)
jdx = np.argmax(m)
step_size = (m[jdx] - d - 1.0) / ((d + 1) * (m[jdx] - 1.0))
new_u = (1 - step_size) * u
new_u[jdx] += step_size
err = la.norm(new_u - u)
u = new_u
c = u * points
a = la.inv(points.T * np.diag(u) * points - c.T * c) / d
return np.asarray(a), np.squeeze(np.asarray(c))
def _int_var_rbf_hyp(self, hyp, X, jitter=1e-8):
"""
Posterior integral variance as a function of hyper-parameters
:param hyp: RBF kernel hyper-parameters [s2, el_1, ..., el_d]
:param X: sigma-points
:param jitter: numerical jitter (for stabilizing computations)
:return: posterior integral variance
"""
# reshape X to SP matrix
X = np.reshape(X, (self.n, self.d))
# set kernel hyper-parameters
s2, el = 1, hyp # sig_var hyper always set to 1
self.kern.param_array[0] = s2 # variance
self.kern.param_array[1:] = el # lengthscale
K = self.kern.K(X)
L = np.diag(el ** 2)
# posterior variance of the integral
ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X)
postvar = s2 * np.sqrt(det(2 * inv(L) + np.eye(self.d))) ** -1 - ks.dot(
solve(K + jitter * np.eye(self.n), ks.T))
return postvar
def cov(self):
"""
Compute parameter covariance
Returns
-------
c : ndarray
Parameter covariance
"""
s = self.s
nobs = self._xe.shape[0]
scale = 1 / (nobs - int(self._debiased) * self._df)
if self.square:
ji = self.inv_jacobian
out = ji @ s @ ji.T
else:
j = self.jacobian
out = inv(j.T @ inv(s) @ j)
out = (scale / 2) * (out + out.T)
return out
def w(self, moments):
"""
Score/moment condition weighting matrix
Parameters
----------
moments : ndarray
Moment conditions (nobs by nmoments)
Returns
-------
w : ndarray
Weighting matrix computed from moment conditions
"""
if self._center:
moments = moments - moments.mean(0)[None, :]
nobs = moments.shape[0]
out = moments.T @ moments / nobs
return inv((out + out.T) / 2.0)
def w(self, moments):
"""
Score/moment condition weighting matrix
Parameters
----------
moments : ndarray
Moment conditions (nobs by nmoments)
Returns
-------
w : ndarray
Weighting matrix computed from moment conditions
"""
if self._center:
moments = moments - moments.mean(0)[None, :]
out = self._kernel_cov(moments)
return inv(out)
def _f_statistic(self, params, cov, debiased):
non_const = ~(self._x.ptp(0) == 0)
test_params = params[non_const]
test_cov = cov[non_const][:, non_const]
test_stat = test_params.T @ inv(test_cov) @ test_params
test_stat = float(test_stat)
nobs, nvar = self._x.shape
null = 'All parameters ex. constant are zero'
name = 'Model F-statistic'
df = test_params.shape[0]
if debiased:
wald = WaldTestStatistic(test_stat / df, null, df, nobs - nvar,
name=name)
else:
wald = WaldTestStatistic(test_stat, null, df, name=name)
return wald
def cov(self):
"""Covariance of estimated parameters"""
x, z = self.x, self.z
nobs, nvar = x.shape
pinvz = self._pinvz
v = (x.T @ z) @ (pinvz @ x) / nobs
if self._kappa != 1:
kappa = self._kappa
xpx = x.T @ x / nobs
v = (1 - kappa) * xpx + kappa * v
vinv = inv(v)
c = vinv @ self.s @ vinv / nobs
return (c + c.T) / 2
def _gls_cov(self):
x = self._x
sigma = self._sigma
sigma_inv = inv(sigma)
xpx = blocked_inner_prod(x, sigma_inv)
# Handles case where sigma_inv is not inverse of full_sigma
xeex = blocked_inner_prod(x, sigma_inv @ self._full_sigma @ sigma_inv)
if self._constraints is None:
xpxi = inv(xpx)
cov = xpxi @ xeex @ xpxi
else:
cons = self._constraints
xpx = cons.t.T @ xpx @ cons.t
xpxi = inv(xpx)
xeex = cons.t.T @ xeex @ cons.t
cov = cons.t @ (xpxi @ xeex @ xpxi) @ cons.t.T
cov = (cov + cov.T) / 2
return cov
def __init__(self, x, eps, sigma, full_sigma, gls=False, debiased=False, constraints=None):
super(HeteroskedasticCovariance, self).__init__(x, eps, sigma, full_sigma,
gls=gls,
debiased=debiased,
constraints=constraints)
self._name = 'Heteroskedastic (Robust) Covariance'
k = len(x)
weights = inv(sigma) if gls else eye(k)
bigx = blocked_diag_product(x, weights)
nobs = eps.shape[0]
e = eps.T.ravel()[:, None]
bigxe = bigx * e
m = bigx.shape[1]
xe = zeros((nobs, m))
for i in range(nobs):
xe[i, :] = bigxe[i::nobs].sum(0)[None, :]
self._moments = xe