def spatFT_LSF(data, position_grid, order_max, spherical_harmonic_bases=None):
'''Returns spherical harmonics coefficients least square fitted to provided data
Parameters
----------
data : array_like, complex
Data to be fitted to
position_grid : array_like, or io.SphericalGrid
Azimuth / colatitude data locations
order_max: int
Maximum order N of fit
Returns
-------
coefficients: array_like, float
Fitted spherical harmonic coefficients (indexing: n**2 + n + m + 1)
'''
position_grid = SphericalGrid(*position_grid)
if spherical_harmonic_bases is None:
spherical_harmonic_bases = sph_harm_all(order_max, position_grid.azimuth, position_grid.colatitude)
return lstsq(spherical_harmonic_bases, data)[0]
python类lstsq()的实例源码
def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri):
"""
Update the linear transformation matrix that links the FRI sequence to the
visibilities by using the reconstructed Dirac locations.
:param phi_recon: the reconstructed Dirac locations (azimuths)
:param M: the Fourier series expansion is between -M to M
:param p_mic_x: a vector that contains microphones' x-coordinates
:param p_mic_y: a vector that contains microphones' y-coordinates
:param mtx_freq2visi: the linear mapping from Fourier series to visibilities
:return:
"""
L = 2 * M + 1
ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
phi_recon = np.reshape(phi_recon, (1, -1), order='F')
mtx_amp2freq = np.exp(-1j * ms_half * phi_recon) # size: (M + 1) x K
mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :])) # size: (2M + 1) x K
mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
# projection mtx_freq2visi to the null space of mtx_fri2amp
mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \
np.dot(mtx_fri2visi_ri, mtx_null_proj)
return G_updated
def mldivide(a, b):
dimensions = a.shape
if dimensions[0] == dimensions[1]:
return la.solve(a, b)
else:
return la.lstsq(a, b)[0]
def new_fast_norm(image, Fibers, cols=None, mask=None):
if mask is None:
mask = np.zeros(image.shape)
a,b = image.shape
if cols is None:
cols = np.arange(b)
init_model = np.zeros((a,len(Fibers)))
norm = np.zeros((len(Fibers),b))
for col in cols:
for i,fiber in enumerate(Fibers):
xsel = np.where(fiber.xind==col)[0]
init_model[fiber.yind[xsel],i] = fiber.core[xsel]
if (mask[:,col]==0).sum()>(a*1./2.):
norm[:,col] = lstsq(init_model[mask[:,col]==0,:],
image[mask[:,col]==0,col])[0]
for i,fiber in enumerate(Fibers):
fiber.spectrum = norm[i,:]
def measure_background(image, Fibers, width=30, niter=3, order=3):
t = []
a,b = image.shape
ygrid,xgrid = np.indices(image.shape)
ygrid = 1. * ygrid.ravel() / a
xgrid = 1. * xgrid.ravel() / b
image = image.ravel()
s = np.arange(a*b)
for fiber in Fibers:
t.append(fiber.D*fiber.yind + fiber.xind)
t = np.hstack(t)
t = np.array(t, dtype=int)
ind = np.setdiff1d(s,t)
mask = np.zeros((a*b))
mask[ind] = 1.
mask[ind] = 1.-is_outlier(image[ind])
sel = np.where(mask==1.)[0]
for i in xrange(niter):
V = polyvander2d(xgrid[sel],ygrid[sel],[order,order])
sol = np.linalg.lstsq(V, image[sel])[0]
vals = np.dot(V,sol) - image[sel]
sel = sel[~is_outlier(vals)]
V = polyvander2d(xgrid,ygrid,[order,order])
back = np.dot(V, sol).reshape(a,b)
return back
def TaylorFit(block):
'''
a*s*s + b*x*x + c*y*y + d*x*y + e*s + f*x + g*y + h
'''
A = []
b = []
for s in range(3):
for x in range(3):
for y in range(3):
z = block[s,x,y]
row = [s*s,x*x,y*y,x*y,s,x,y,1]
A.append(row)
b.append([z])
params,resids,rank,s = lstsq(A,b)
return params.flatten()
def solve(self, Y):
X, _residues, _rank, _sv = la.lstsq(self._Sigma, Y)
return X
def mtx_updated_G_multiband(phi_recon, M, mtx_amp2visi_ri,
mtx_fri2visi_ri, num_bands):
"""
Update the linear transformation matrix that links the FRI sequence to the
visibilities by using the reconstructed Dirac locations.
:param phi_recon: the reconstructed Dirac locations (azimuths)
:param M: the Fourier series expansion is between -M to M
:param p_mic_x: a vector that contains microphones' x-coordinates
:param p_mic_y: a vector that contains microphones' y-coordinates
:param mtx_freq2visi: the linear mapping from Fourier series to visibilities
:return:
"""
L = 2 * M + 1
ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
phi_recon = np.reshape(phi_recon, (1, -1), order='F')
mtx_amp2freq = np.exp(-1j * ms_half * phi_recon) # size: (M + 1) x K
mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :])) # size: (2M + 1) x K
mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
# projection mtx_freq2visi to the null space of mtx_fri2amp
mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
G_updated = np.dot(mtx_amp2visi_ri,
linalg.block_diag(*([mtx_fri2amp_ri] * num_bands))
) + \
np.dot(mtx_fri2visi_ri,
linalg.block_diag(*([mtx_null_proj] * num_bands))
)
return G_updated
def comp_mapping(X, Y):
from GPy.core.parameterization.variational import VariationalPosterior
X = X.mean.values if isinstance(X, VariationalPosterior) else X
Y = Y.mean.values if isinstance(Y, VariationalPosterior) else Y
from scipy.linalg import lstsq
W = lstsq(X,Y)[0]
return W
def get_norm_nonparametric_fast(image, Fibers, cols=None, mask=None,
plaw_coeff1=0.0004):
plaw_coeff = np.array([plaw_coeff1, 0.5, 0.15, 1.0])
bins=len(Fibers[0].binx)
a,b = image.shape
if mask is None:
mask = np.zeros(image.shape)
ygrid,xgrid = np.indices(image.shape)
fun = np.zeros((bins,))
y=ygrid[:,0]
Fl = np.zeros((len(y), bins))
P = build_powerlaw(ygrid, Fibers, plaw_coeff, cols)
init_model = np.zeros((len(y),len(Fibers)))
if cols is None:
cols = np.arange(b)
norm = np.zeros((len(Fibers),b))
for col in cols:
for i,fiber in enumerate(Fibers):
ix = y-fiber.trace[col]
for j in xrange(bins):
fun[j] = 1.0
Fl[:,j] = np.interp(ix,fiber.binx,fun,left=0.0,right=0.0)
fun[j] = 0.0
init_model[:,i] = np.dot(Fl, fiber.fibmodel[col,:])+P[:,col,i]
norm[:,col] = lstsq(init_model[mask[:,col]==0,:],
image[mask[:,col]==0,col])[0]
return norm
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def bundletoa(*args):
debug = True
xt = None
yt = None
res = None
jac = None
for kkk in range(0, 30):
D = args[0]
i = args[1]
J = args[2]
xt = args[3]
yt = args[4]
res, jac = calcresandjac(D, i, J, xt, yt)
dz_a = -((jac.conj().T) * jac + np.eye(jac.shape[1]))
dz_b = (jac.conj().T) * res
dz = linalg.lstsq(dz_a, dz_b)
xtn, ytn = updatexy(xt, yt, dz)
res2, jac2 = calcresandjac(D, i, J, xtn, ytn)
cc = np.linalg.norm(jac * dz) / np.linalg.norm(res)
if np.linalg.norm(res) < np.linalg.norm(res2):
if cc > 1e-4:
kkkk = 1
while (kkkk < 50) and (
np.linalg.norm(res) < np.linalg.norm(res2)):
dz = dz / 2
xtn, ytn = updatexy(xt, yt, dz)
res2, jac2 = calcresandjac(D, i, J, xtn, ytn)
kkkk = kkkk + 1
if debug:
aa = np.concatenate((np.linalg.norm(res), np.linalg.norm(
res + jac * dz), np.linalg.norm(res)), 1)
bb = aa
bb = bb - bb[1]
bb = bb / bb[0]
cc = (np.linalg.norm(jac * dz)) / np.linalg.norm(res)
print(aa, bb, cc)
if np.linalg.norm(res2) < np.linalg.norm(res):
xt = xtn
yt = ytn
else:
print()
xopt = xt
yopt = yt
return xopt, yopt, res, jac
def _fit_lm(data, design_matrix, names):
"""Aux function"""
from scipy import stats
n_samples = len(data)
n_features = np.product(data.shape[1:])
if design_matrix.ndim != 2:
raise ValueError('Design matrix must be a 2d array')
n_rows, n_predictors = design_matrix.shape
if n_samples != n_rows:
raise ValueError('Number of rows in design matrix must be equal '
'to number of observations')
if n_predictors != len(names):
raise ValueError('Number of regressor names must be equal to '
'number of column in design matrix')
y = np.reshape(data, (n_samples, n_features))
betas, resid_sum_squares, _, _ = linalg.lstsq(a=design_matrix, b=y)
df = n_rows - n_predictors
sqrt_noise_var = np.sqrt(resid_sum_squares / df).reshape(data.shape[1:])
design_invcov = linalg.inv(np.dot(design_matrix.T, design_matrix))
unscaled_stderrs = np.sqrt(np.diag(design_invcov))
tiny = np.finfo(np.float64).tiny
beta, stderr, t_val, p_val, mlog10_p_val = (dict() for _ in range(5))
for x, unscaled_stderr, predictor in zip(betas, unscaled_stderrs, names):
beta[predictor] = x.reshape(data.shape[1:])
stderr[predictor] = sqrt_noise_var * unscaled_stderr
p_val[predictor] = np.empty_like(stderr[predictor])
t_val[predictor] = np.empty_like(stderr[predictor])
stderr_pos = (stderr[predictor] > 0)
beta_pos = (beta[predictor] > 0)
t_val[predictor][stderr_pos] = (beta[predictor][stderr_pos] /
stderr[predictor][stderr_pos])
cdf = stats.t.cdf(np.abs(t_val[predictor][stderr_pos]), df)
p_val[predictor][stderr_pos] = np.clip((1. - cdf) * 2., tiny, 1.)
# degenerate cases
mask = (~stderr_pos & beta_pos)
t_val[predictor][mask] = np.inf * np.sign(beta[predictor][mask])
p_val[predictor][mask] = tiny
# could do NaN here, but hopefully this is safe enough
mask = (~stderr_pos & ~beta_pos)
t_val[predictor][mask] = 0
p_val[predictor][mask] = 1.
mlog10_p_val[predictor] = -np.log10(p_val[predictor])
return beta, stderr, t_val, p_val, mlog10_p_val
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)
if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self._residues = np.vstack(out[3] for out in outs)
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self