def get_device_location(gpsPoints):
"""
params: takes an array of gps positions and signal stregnths of the form [{"x": 1, "y": 4, "z": 1.6, "signal": 1.3}]
returns: Position and output power of emitter of the form {"x": 1, "y": 4, "z": 1.6, "sigPower": 1.3}
"""
aRows = []
bRows = []
last = gpsPoints.pop()
for gpsPoint in gpsPoints:
aRow = [2.0 * (last["x"] - gpsPoint["x"]),
2.0 * (last["y"] - gpsPoint["y"]),
2.0 * (last["z"] - gpsPoint["z"]),
(-1 * gpsPoint["signal"]**2 + last["signal"]**2)]
bRow = [-(gpsPoint["x"]**2 - last["x"]**2) - (gpsPoint["y"]**2 - last["y"]**2) - (gpsPoint["z"]**2 - last["z"]**2)]
aRows.append(aRow)
bRows.append(bRow)
aMatrix = matrix(aRows)
bMatrix = matrix(bRows)
solution = lstsq(aMatrix, bMatrix)[0]
return {"x": solution.item(0), "y": solution.item(1), "z": solution.item(2), "sigPower": solution.item(3)**0.5}
python类lstsq()的实例源码
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def test_ridge():
"""Test Ridge regression for different values of mu."""
# A simple sum function (with intercept)
X = [[1, 2], [3, 4], [5, 6]]
y = [sum(x)+1 for x in X]
T = [[7, 8], [9, 10], [2, 1]]
model = RidgeRegression(mu=0.0).fit(X, y) # OLS
assert_array_almost_equal([1, 1], model.coef_)
assert_array_almost_equal([16, 20, 4], model.predict(T))
assert_almost_equal(1.0, model.intercept_)
# Equivalence with standard numpy least squares
Xc = X - np.mean(X, axis=0)
assert_almost_equal(la.lstsq(Xc, y)[0], model.coef_)
model = RidgeRegression(mu=0.5).fit(X, y)
assert_array_almost_equal([0.91428571, 0.91428571], model.coef_)
assert_array_almost_equal([15.31428571, 18.97142857, 4.34285714],
model.predict(T))
model = RidgeRegression(mu=1.0).fit(X, y)
assert_array_almost_equal([0.84210526, 0.84210526], model.coef_)
assert_array_almost_equal([14.73684211, 18.10526316, 4.63157895],
model.predict(T))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
test_linalg.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def vector_to_star_basis(self, standard_vec):
'''
convert a vector in the standard basis to a point in the star's basis.
This solves basis_matrix * rv = input, which is essentially computing the
inverse of basis_matrix, which can become ill-conditioned.
'''
Timers.tic("vector_to_star_basis()")
rv = np.linalg.solve(self.basis_matrix.T, standard_vec)
#rv = lstsq(self.basis_matrix.T, np.array(standard_vec, dtype=float))[0]
# double-check that we've found the solution within some tolerance
if not np.allclose(np.dot(self.basis_matrix.T, rv), standard_vec):
raise RuntimeError("basis matrix was ill-conditioned, vector_to_star_basis() failed")
Timers.toc("vector_to_star_basis()")
assert isinstance(rv, np.ndarray)
return rv
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def left_least_squares(x,y,rcond=-1,fast=False):
'find the A that best fits y-A*x'
if fast:
return la.lstsq( np.matmul(x,x.T) ,np.matmul(x,y.T) ,rcond=rcond )[0].T # faster, but less stable
else:
return la.lstsq( x.T,y.T,rcond=rcond)[0].T
def solve(A, y, delta, method):
if method == 'ridge_reg_chol':
R = cholesky(dot(A.T, A) + delta*np.identity(A.shape[1]))
z = lstsq(R.T, dot(A.T, y))[0]
x = lstsq(R, z)[0]
elif method == 'ridge_reg_inv':
x = dot(dot(inv(dot(A.T, A) + delta*np.identity(A.shape[1])), A.T), y)
elif method == 'ls_mldivide':
if delta > 0:
print('ignoring lambda; no regularization used')
x = lstsq(A, y)[0]
loss = 0.5 * (dot(A, x) - y) **2
return x.reshape(-1, 1)
def trainClassifer(self,labels,vectors,ilog=None):
'''
Train the polynomial. Do not call this function
manually, instead call the train function on the super
class.
'''
#build matrix
matrix = []
for each in vectors:
if len(each) != 2:
raise ValueError("ERROR: Vector length=%d. Polynomial2D only predicts for vectors of length 2."%len(each))
x,y = each
matrix.append(self.buildRow(x,y))
matrix = array(matrix)
labels = array(labels)
x,resids,rank,s = lstsq(matrix,labels)
self.x = x
self.resids = resids
self.rank = rank
self.s = s
if rank != matrix.shape[1]:
print "WARNING: Polynomial is not fully constrained."
def _fit(self, X, y):
self.coef_ = la.lstsq(X, y)[0]
def solve(self):
self._check_nodes()
# Solve LS
self.VU = [node[key] for node in self.U.values() for key in ("ux","uy")]
self.VF = [node[key] for node in self.F.values() for key in ("fx","fy")]
knw = [pos for pos,value in enumerate(self.VU) if not value is np.nan]
unknw = [pos for pos,value in enumerate(self.VU) if value is np.nan]
self.K2S = np.delete(np.delete(self.KG,knw,0),knw,1)
self.F2S = np.delete(self.VF,knw,0)
# For displacements
try:
self.solved_u = la.solve(self.K2S,self.F2S)
except:
print("Solved using LSTSQ")
self.solved_u = la.lstsq(self.K2S, self.F2S)[0]
for k,ic in enumerate(unknw):
nd, var = self.index2key(ic)
self.U[nd][var] = self.solved_u[k]
# Updating nodes displacements
for nd in self.nodes.values():
if np.isnan(nd.ux):
nd.ux = self.U[nd.label]["ux"]
if np.isnan(nd.uy):
nd.uy = self.U[nd.label]["uy"]
# For nodal forces/reactions
self.NF = self.F.copy()
self.VU = [node[key] for node in self.U.values() for key in ("ux","uy")]
nf_calc = np.dot(self.KG, self.VU)
for k in range(2*self.get_number_of_nodes()):
nd, var = self.index2key(k, ("fx","fy"))
self.NF[nd][var] = nf_calc[k]
cnlab = np.floor(k/float(self.dof))
if var=="fx":
self.nodes[cnlab].fx = nf_calc[k]
elif var=="fy":
self.nodes[cnlab].fy = nf_calc[k]
def _fit(self):
"""Compute a least squares fit."""
A = self._plegendre(self._normalize(self.get_x()))
self.beta = la.lstsq(A, self.get_fx())[0]
def mils(A, B, y, p=1):
# x_hat,z_hat = mils(A,B,y,p) produces p pairs of optimal solutions to
# the mixed integer least squares problem min_{x,z}||y-Ax-Bz||,
# where x and z are real and integer vectors, respectively.
#
# Input arguments:
# A - m by k real matrix
# B - m by n real matrix
# [A,B] has full column rank
# y - m-dimensional real vector
# p - the number of optimal solutions
#
# Output arguments:
# x_hat - k by p real matrix
# z_hat - n by p integer matrix (in double precision).
# The pair {x_hat(:,j),z_hat(:,j)} is the j-th optimal solution
# i.e., its residual is the j-th smallest, so
# ||y-A*x_hat(:,1)-B*z_hat(:,1)||<=...<=||y-A*x_hat(:,p)-B*z_hat(:,p)||
m, k = A.shape
m2, n = B.shape
if m != m2 or m != len(y) or len(y[1]) != 1:
raise ValueError("Input arguments have a matrix dimension error!")
if rank(A) + rank(B) < k + n:
raise ValueError("hmmm...")
Q, R = qr(A, mode='complete')
Q_A = Q[:, :k]
Q_Abar = Q[:, k:]
R_A = R[:k, :]
# Compute the p optimal integer least squares solutions
z_hat = ils(dot(Q_Abar.T, B), dot(Q_Abar.T, y), p)
# Compute the corresponding real least squares solutions
x_hat = lstsq(R_A, dot(Q_A.T, (dot(y, ones((1, p))) - dot(B, z_hat))))
return x_hat, z_hat
toa_3D_bundle_with_smoother.py 文件源码
项目:lps-anchor-pos-estimator
作者: bitcraze
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def bundletoa(D, I, J, xt, yt, debug=1, opts=[]):
res = None
jac = None
for kkk in range(0, 10):
res, jac = calcresandjac(D, I, J, xt, yt, opts)
dz = linalg.lstsq(-((jac.conj().T) * jac + 0.1 *
np.eye(jac.shape[1])), (jac.conj().T) * res)
xtn, ytn = updatexy(xt, yt, dz)
res2, jac2 = calcresandjac(D, I, J, xt, yt, opts)
cc = np.linalg.norm(jac * dz) / np.linalg.norm(res)
if np.linalg.norm(res) < np.linalg.norm(res2):
if cc > 1e-4:
kkkk = 1
while (kkkk < 50) and (
np.linalg.norm(res) < np.linalg.norm(res2)):
dz = dz / 2
xtn, ytn = updatexy(xt, yt, dz)
res2, jac2 = calcresandjac(D, I, J, xtn, ytn, opts)
kkkk = kkkk + 1
if debug:
aa_1 = np.linalg.norm(res)
aa_2 = np.linalg.norm(res + jac * dz)
aa_3 = np.linalg.norm(res2)
aa = np.concatenate((aa_1, aa_2, aa_3), 1)
bb = aa
bb = bb - bb[1]
bb = bb / bb[0]
cc = np.linalg.norm(jac * dz) / np.linalg.norm(res)
print(aa, bb, cc)
if np.linalg.norm(res2) < np.linalg.norm(res):
xt = xtn
yt = ytn
else:
if debug:
print(kkk, ' stalled')
xopt = xt
yopt = yt
return xopt, yopt, res, jac
def AffineFromPointsLS(src,dst,new_size,filter=BILINEAR, normalize=True):
'''
An affine transform that will rotate, translate, and scale to map one
set of points to the other. For example, to align eye coordinates in face images.
Find a transform (a,b,tx,ty) such that it maps the source points to the
destination points::
a*x1-b*y1+tx = x2
b*x1+a*y1+ty = y2
This method minimizes the squared error to find an optimal fit between the
points.
@param src: a list of link.Points in the source image.
@param dst: a list of link.Points in the destination image.
@param new_size: new size for the image.
@param filter: PIL filter to use.
'''
if normalize:
# Normalize Points
src_norm = AffineNormalizePoints(src)
src = src_norm.transformPoints(src)
dst_norm = AffineNormalizePoints(dst)
dst = dst_norm.transformPoints(dst)
# Compute the transformation parameters
A = []
b = []
for i in range(len(src)):
A.append([src[i].X(),-src[i].Y(),1,0])
A.append([src[i].Y(), src[i].X(),0,1])
b.append(dst[i].X())
b.append(dst[i].Y())
A = array(A)
b = array(b)
result,resids,rank,s = lstsq(A,b)
a,b,tx,ty = result
# Create the transform matrix
matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d')
if normalize:
matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix))
return AffineTransform(matrix,new_size,filter)
def _split_state (a_old, o_old, state, q0l, q1l):
# use least-squares to find new parameters
# add new state to end
ns = a_old.shape[0]
no = o_old.shape[1]
A = numpy.zeros((ns+1,ns+1))
O = numpy.zeros((ns+1,no))
A[0:ns,0:ns] = a_old
O[0:ns,:] = o_old
# fill in p ( * | snew2)
A[ns,:] = A[state,:]
# fill in p (q | snew2)
O[ns,:] = O[state,:] # order of these three lines matters
O[ns,q0l] = 0
w2 = numpy.sum (O[ns,:])
O[ns,:] = O[ns,:] / w2
# fill in p (q | snew1)
O[state,q1l] = 0
w1 = numpy.sum(O[state,:])
O[state,:] = O[state,:] / w1
# finally, fill in p( snew1 | *) p ( snew2 | *)
A[:,ns] = A[:,state]
A[:,state] = w1 / (w1+w2) * A[:,state]
A[:,ns] = w2 / (w1++w2) * A[:,ns]
# done
return A,O,ns
# tricky part is the incoming state reallocate by solving linear system
# (we'll call the coefficients M)
# M = numpy.zeros(( ns + no*ns, 2*(ns+1) ))
# b = numpy.zeros( M.shape[0] )
# ri = 0
# def idx (s0, s1): return s0 if s1 == state else ns + s0
# for si in range(ns):
# M[ri,idx(si,state)] = M[ri,idx(si,ns)] = 1
# b[ri] = a_old[si,state]
# ri += 1
# # q1 obs constraint
# print M.shape
# for si in range(ns):
# for qi in range(no):
# print "O[state,qi]", O[state,qi]
# M[ri,idx(si,state)] = O[state,qi]
# M[ri,idx(si,ns)] = O[si,qi]
# print ri, "o_old", state, qi, o_old[state,qi]
# print ri, b[ri]
# b[ri] = o_old[state,qi]
# ri += 1
# # solve
# x,resids,rank,s = linalg.lstsq(M,b)
# print "X", x
# print "RESIDS", resids
# A[:,state] = x[0:ns+1]
# A[:,state] = A[:,state] / numpy.sum(A[:,state])
# A[:,ns] = x[ns+1:]
# A[:,ns] = A[:,ns] / numpy.sum(A[:,ns])
# # finally
# return A,O,ns
def optimize_model_coefficients(self, inliers, model_coefficients):
'''
Recompute the model coefficients using the given inlier set and return them to the user.
These are the coefficients of the model after refinement
(e.g., after SVD)
# Parameters
inliers : list of int
The data inliers supporting the model
model_coefficients : coefficients array
The initial guess for the model coefficients
# Returns
optimized_coefficients : coefficients array
The resultant recomputed coefficients after non-linear optimization
'''
logger = logging.getLogger('pcl.sac.SampleConsensusModel.optimize_model_coefficients')
if len(model_coefficients) != self.model_size:
logger.error('Invalid number of model coefficients given (%lu).',
len(model_coefficients))
return model_coefficients
if len(inliers) <= self.sample_size:
logger.error('Not enough inliers found to optimize model coefficients (%lu)!',
len(model_coefficients))
return model_coefficients
###### Followings are implemetation of original PCL using PCA ######
# covariance_matrix, xyz_centroid = compute_mean_and_covariance_matrix(self._input, inliers)
# eigen_value, eigen_vector = np.linalg.eigh(covariance_matrix)
# smallest = np.argmin(eigen_value)
#
# optimized_coefficients = eigen_vector[:, smallest].tolist()
# optimized_coefficients.append(-np.dot(optimized_coefficients + [0], xyz_centroid))
cloud = self._input.xyz
if inliers is not None:
cloud = cloud[inliers]
# Use Least-Squares to fit the plane through all the given sample points
# and find out its coefficients
constant = -np.ones(len(cloud))
optimized_coefficients, *_ = lstsq(cloud, constant)
optimized_coefficients = optimized_coefficients.tolist()
optimized_coefficients.append(1)
if not self._is_model_valid(optimized_coefficients):
logger.warning('Optimized coefficients invalid, returning original one')
return model_coefficients
return optimized_coefficients
def _slow_path(self):
"""Frisch-Waugh-Lovell implementation, works for all scenarios"""
has_effect = self.entity_effects or self.time_effects or self.other_effects
w = self.weights.values2d
root_w = np.sqrt(w)
y = root_w * self.dependent.values2d
x = root_w * self.exog.values2d
if not has_effect:
ybar = root_w @ lstsq(root_w, y)[0]
return y, x, ybar, 0, 0
drop_first = self._constant
d = []
if self.entity_effects:
d.append(self.dependent.dummies('entity', drop_first=drop_first).values)
drop_first = True
if self.time_effects:
d.append(self.dependent.dummies('time', drop_first=drop_first).values)
drop_first = True
if self.other_effects:
oe = self._other_effect_cats.dataframe
for c in oe:
dummies = pd.get_dummies(oe[c], drop_first=drop_first).astype(np.float64)
d.append(dummies.values)
drop_first = True
d = np.column_stack(d)
wd = root_w * d
if self.has_constant:
wd -= root_w * (w.T @ d / w.sum())
z = np.ones_like(root_w)
d -= z * (z.T @ d / z.sum())
x_mean = np.linalg.lstsq(wd, x)[0]
y_mean = np.linalg.lstsq(wd, y)[0]
# Save fitted unweighted effects to use in eps calculation
x_effects = d @ x_mean
y_effects = d @ y_mean
# Purge fitted, weighted values
x = x - wd @ x_mean
y = y - wd @ y_mean
ybar = root_w @ lstsq(root_w, y)[0]
return y, x, ybar, y_effects, x_effects