def PCA_values(data, centered=True):
n_samples, n_features = data.shape
#By default, the data are centered
if (centered == True):
data_centered = data - mean(data, axis=0)
else:
data_centered = data
#apply the Single Vector Decomposition
U, S, V = linalg.svd(data_centered, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
#components
components_ = V
#variance explained by PCs
explained_variance_ratio_ = varianceExplained(S, n_samples)
return(components_, explained_variance_ratio_)
python类svd()的实例源码
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
test_linalg.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def update(self, Y):
"""Alg. 2: Global update of the singular vectors at time t using exact SVD.
Args:
Y (numpy array): m-by-n_t matrix which has n_t "normal" unit vectors.
"""
if not hasattr(self, 's'):
# initial update
self.U, self.s, V = ln.svd(Y, full_matrices=False)
else:
F = np.concatenate((np.diag(self.s), np.dot(self.U.T, Y)), axis=1)
U, self.s, V = ln.svd(F, full_matrices=False)
self.U = np.dot(self.U, U)
self.U_k = self.U[:, :self.k]
def svd_score(data_mat, user, item, sim_meas):
n = shape(data_mat)[1]
sim_total = 0.0
rat_total = 0.0
u, sigma, vt = la.svd(data_mat)
sig_mat = mat(eye(4) * sigma[:4])
data_trans = data_mat.T * u[:, :4] * sig_mat.I
for j in range(n):
user_rate = data_mat[user, j]
if user_rate == 0 or j == item:
continue
sim = sim_meas(data_trans[item, :].T, data_trans[j, :].T)
print('the %d and %d similarity is: %f' % (item, j, sim))
sim_total += sim
rat_total += sim * user_rate
if sim_total == 0:
return 0
else:
return rat_total / sim_total
def img_comp(numsv=3, thresh=0.8):
text = []
with open('0_5.txt') as fr:
for line in fr.readlines():
row = []
for i in range(32):
row.append(int(line[i]))
text.append(row)
print('*****origin matrix*****')
text_mat = mat(text)
print_mat(text_mat, thresh)
u, sigma, vt = la.svd(text_mat)
sig = mat(eye(numsv) * sigma[:numsv])
text_trans = u[:, :numsv] * sig * vt[:numsv, :]
print('*****reconstructed matrix using %d singular values*****' % numsv)
print_mat(text_trans, thresh)
def estimate_X_TLS(y,H):
""" Estimator of x for the Linear Model using Total Least Square (TLS). As compared to the classical Least Squares Estimator, the TLS estimator is more suited when H is not exactly known or contained with noise [GOL80]_.
.. [GOL80] Golub, Gene H., and Charles F. Van Loan. "An analysis of the total least squares problem." SIAM Journal on Numerical Analysis 17.6 (1980): 883-893.
"""
N,L=H.shape
H=np.matrix(H)
Z=np.hstack([H,y])
U,S,V=lg.svd(Z)
V=np.matrix(V)
V=V.H
VHY=V[:L,L:];
VYY=V[L:,L:];
x= -VHY*lg.inv(VYY);
return x
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def zca(v):
"""Zero Component Analysis https://github.com/mwv/zca/blob/master/zca/zca.py"""
v = v.reshape((v.shape[0], -1))
m = v.mean(0)
vm = N.ascontiguousarray((v - m).T)
# print(vm.shape)
# cov = N.zeros((vm.shape[0], vm.shape[0]), 'f')
# for x in range(vm.shape[0]):
# for y in range(x, vm.shape[0]):
# cov[x, y] = cov[y, x] = vm[x].dot(vm[y])
# cov /= v.shape[0] - 1
cov = vm.dot(vm.T) / (v.shape[0] - 1)
u, s = LA.svd(cov, full_matrices=0)[:2]
w = (u * (1 / N.sqrt(s.clip(1e-6)))).dot(u.T)
# dw = (u * (1 / N.sqrt(s))).dot(u.T) # Dewithen
return m, w
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = map(self._normalise, vectors)
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.Float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print self._Tt, vectors
return [self.classify(vector) for vector in vectors]
def test_B():
print 'test_B'
random.seed()
a=B(*svd(random.random((3,3))))
print 'a:\n%s'%a.M
b=random.random((3,3))
print 'b:\n%s'%b
c1,c2=b.dot(a.M),a.ldot(b)
print 'dot(b,a):\n%s'%c1
print 'a.ldot(b):\n%s'%c2.M
print 'the difference:\n%s'%(c1-c2.M)
d1,d2=a.M.dot(b),a.rdot(b)
print 'a.dot(b):\n%s'%d1
print 'a.rdot(b):\n%s'%d2.M
print 'the difference:\n%s'%(d1-d2.M)
print
def __init__(self,T,Vs,pos=0):
if pos>len(Vs) or pos<0:
raise ValueError('Bs __init__ error: pos(%s) should be in range(0,%s).'%(pos,len(Vs)))
self.pos=pos
self.extend([None]*len(Vs))
Vs=np.array(Vs)
for i,V in enumerate(Vs[0:pos]):
if i==0:
self[i]=B(*svd(V.dot(T)))
else:
self[i]=self[i-1].ldot(V.dot(T))
for i,V in enumerate(Vs[pos:][::-1]):
if i==0:
self[len(Vs)-1-i]=B(*svd(V.dot(T)))
else:
self[i]=self[len(Vs)-1-i]=self[len(Vs)-i].rdot(V.dot(T))
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = map(self._normalise, vectors)
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.Float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print self._Tt, vectors
return [self.classify(vector) for vector in vectors]
def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
from pyfusion.data.base import DataSet
from pyfusion.data.timeseries import FlucStruc
if label:
fs_dataset = DataSet(label)
else:
fs_dataset = DataSet('flucstrucs_%s' %datetime.now())
svd_data = input_data.subtract_mean().normalise(method, separate).svd()
for fs_gr in group(svd_data):
tmp = FlucStruc(svd_data, fs_gr, input_data.timebase, min_dphase=min_dphase)
tmp.meta = input_data.meta
fs_dataset.add(tmp)
return fs_dataset
def __call__(self, documentSet, words_limit, method="mmr", metric="tf", summary_order="origin"):
dictionary = self._create_dictionary(documentSet)
self.summary_order = summary_order
# empty document
if not dictionary:
return ()
if metric.lower() == "tf":
matrix = self._create_matrix(documentSet, dictionary)
matrix = self._compute_term_frequency(matrix)
elif metric.lower() == "tfidf":
matrix = self._create_tfidf_matrix(documentSet, dictionary)
else:
raise ValueError("Don't support your metric now.")
u, sigma, v = svd(matrix, full_matrices=False)
ranks = iter(self._compute_ranks(sigma, v))
if method.lower() == "default":
return self._get_best_sentences(documentSet.sentences, words_limit,
lambda sent: next(ranks))
if method.lower() == "mmr":
return self._get_best_sentences_by_MMR(documentSet.sentences, words_limit,
matrix, lambda sent: next(ranks))
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def _initR(X, A, lmbdaR):
_log.debug('Initializing R (SVD) lambda R: %s' % str(lmbdaR))
rank = A.shape[1]
U, S, Vt = svd(A, full_matrices=False)
Shat = kron(S, S)
Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank)
R = []
ep = 1e-9
for i in range(len(X)): # parallelize
Rn = Shat * dot(U.T, X[i].dot(U))
Rn = dot(Vt.T, dot(Rn, Vt))
negativeVal = Rn.min()
Rn.__iadd__(-negativeVal+ep)
# if Rn.min() < 0 :
# print("Negative Rn!")
# raw_input("Press Enter: ")
# Rn = np.eye(rank)
# Rn = dot(A.T,A)
R.append(Rn)
print('Initialized R')
return R
# ------------------ Update V ------------------------------------------------
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = map(self._normalise, vectors)
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.Float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print self._Tt, vectors
return [self.classify(vector) for vector in vectors]
def update_model(self, y):
y = self.proj.reduce(np.array([y]).T)
y = preprocessing.normalize(y, norm='l2', axis=0) # (k, 1)
if not hasattr(self, 'B'):
self.p_failure = 0.1
self.B = np.zeros((self.k, self.ell))
self.A = np.array([])
U, s, V = ln.svd(self.B, full_matrices=False)
# update the tracked orthonormal bases
self.U_r = U[:, :self.r]
if self.A.size == 0:
self.A = np.empty_like(y)
self.A[:] = y[:]
else:
self.A = np.concatenate((self.A, y), axis=1)
if np.count_nonzero(self.A) >= (self.ell * self.k) or self.A.shape[1] == self.k:
B = self.__boosted_sparse_shrink(self.A, self.ell, self.p_failure)
self.B = self.__dense_shrink(np.concatenate((self.B, B), axis=1), self.ell)
self.A = np.array([])
def projection_matrix(point_set, subspace_rank = 2):
"""
Compute the projection matrix of a set of point depending on the subspace rank.
Args:
- point_set (np.array): list of coordinates of shape (n_point, init_dim).
- dimension_reduction (int) : the dimension reduction to apply
"""
point_set = np.array(point_set)
nb_coord = point_set.shape[0]
init_dim = point_set.shape[1]
assert init_dim > subspace_rank
assert subspace_rank > 0
centroid = point_set.mean(axis=0)
if sum(centroid) != 0:
# - Compute the centered matrix:
centered_point_set = point_set - centroid
else:
centered_point_set = point_set
# -- Compute the Singular Value Decomposition (SVD) of centered coordinates:
U,D,V = svd(centered_point_set, full_matrices=False)
V = V.T
# -- Compute the projection matrix:
H = np.dot(V[:,0:subspace_rank], V[:,0:subspace_rank].T)
return H
def compression(self, method='svd', **kwargs):
"""Return a compression of ``self``. Does not modify ``self``.
Parameters: See :func:`~compress()`.
:returns: ``(compressed_mpa, overlap)`` where ``overlap`` is the inner
product returned by :func:`~compress()`.
"""
if method == 'svd':
target = self.copy()
overlap = target._compress_svd(**kwargs)
return target, overlap
elif method == 'var':
return self._compression_var(**kwargs)
else:
raise ValueError('{!r} is not a valid method'.format(method))
def _compress_svd_l(self, rank, relerr, svdfunc):
"""Compresses the MPA in place from right to left using SVD;
yields a right-canonical state
See :func:`~compress` for more details and arguments.
"""
assert rank > 0, "Cannot compress to rank={}".format(rank)
assert (relerr is None) or ((0. <= relerr) and (relerr <= 1.)), \
"relerr={} not allowed".format(relerr)
for site in range(len(self) - 1, 0, -1):
ltens = self._lt[site]
matshape = (ltens.shape[0], -1)
if relerr is None:
u, sv, v = svdfunc(ltens.reshape(matshape), rank)
rank_t = len(sv)
else:
u, sv, v = svd(ltens.reshape(matshape))
svsum = np.cumsum(sv) / np.sum(sv)
rank_relerr = np.searchsorted(svsum, 1 - relerr) + 1
rank_t = min(ltens.shape[0], v.shape[0], rank, rank_relerr)
yield sv, rank_t
newtens = (matdot(self._lt[site - 1], u[:, :rank_t] * sv[None, :rank_t]),
v[:rank_t, :].reshape((rank_t, ) + ltens.shape[1:]))
self._lt.update(slice(site - 1, site + 1), newtens,
canonicalization=(None, 'right'))
yield np.sum(np.abs(self._lt[0])**2)
def _compress_svd_r(self, rank, relerr, svdfunc):
"""Compresses the MPA in place from left to right using SVD;
yields a left-canonical state
See :func:`~compress` for parameters
"""
assert rank > 0, "Cannot compress to rank={}".format(rank)
assert (relerr is None) or ((0. <= relerr) and (relerr <= 1.)), \
"Relerr={} not allowed".format(relerr)
for site in range(len(self) - 1):
ltens = self._lt[site]
matshape = (-1, ltens.shape[-1])
if relerr is None:
u, sv, v = svdfunc(ltens.reshape(matshape), rank)
rank_t = len(sv)
else:
u, sv, v = svd(ltens.reshape(matshape))
svsum = np.cumsum(sv) / np.sum(sv)
rank_relerr = np.searchsorted(svsum, 1 - relerr) + 1
rank_t = min(ltens.shape[-1], u.shape[1], rank, rank_relerr)
yield sv, rank_t
newtens = (u[:, :rank_t].reshape(ltens.shape[:-1] + (rank_t, )),
matdot(sv[:rank_t, None] * v[:rank_t, :], self._lt[site + 1]))
self._lt.update(slice(site, site + 2), newtens,
canonicalization=('left', None))
yield np.sum(np.abs(self._lt[-1])**2)
def reduce_matrix(A, eps=None):
if np.size(A) == 0:
return A, 0, 0
if np.size(A) == 1:
return A, 1, []
m, n = A.shape
if m != n:
M = np.zeros(2 * (max(A.shape), ))
M[:m, :n] = A
else:
M = A
u, s, v = svd(M)
if eps is None:
eps = s.max() * max(M.shape) * np.finfo(s.dtype).eps
null_mask = (s <= eps)
rank = sum(~null_mask)
null_space = v[null_mask]
u = u[~null_mask][:, ~null_mask]
s = np.diag(s[~null_mask])
v = v[~null_mask]
reduced = u.dot(s.dot(v))
return reduced, rank, null_space
def bernoulli_gaussian_trial(M=250,N=500,L=1000,pnz=.1,kappa=None,SNR=40):
A = np.random.normal(size=(M, N), scale=1.0 / math.sqrt(M)).astype(np.float32)
if kappa >= 1:
# create a random operator with a specific condition number
U,_,V = la.svd(A,full_matrices=False)
s = np.logspace( 0, np.log10( 1/kappa),M)
A = np.dot( U*(s*np.sqrt(N)/la.norm(s)),V).astype(np.float32)
A_ = tf.constant(A,name='A')
prob = TFGenerator(A=A,A_=A_,pnz=pnz,kappa=kappa,SNR=SNR)
prob.name = 'Bernoulli-Gaussian, random A'
bernoulli_ = tf.to_float( tf.random_uniform( (N,L) ) < pnz)
xgen_ = bernoulli_ * tf.random_normal( (N,L) )
noise_var = pnz*N/M * math.pow(10., -SNR / 10.)
ygen_ = tf.matmul( A_,xgen_) + tf.random_normal( (M,L),stddev=math.sqrt( noise_var ) )
prob.xval = ((np.random.uniform( 0,1,(N,L))<pnz) * np.random.normal(0,1,(N,L))).astype(np.float32)
prob.yval = np.matmul(A,prob.xval) + np.random.normal(0,math.sqrt( noise_var ),(M,L))
prob.xinit = ((np.random.uniform( 0,1,(N,L))<pnz) * np.random.normal(0,1,(N,L))).astype(np.float32)
prob.yinit = np.matmul(A,prob.xinit) + np.random.normal(0,math.sqrt( noise_var ),(M,L))
prob.xgen_ = xgen_
prob.ygen_ = ygen_
prob.noise_var = noise_var
return prob
def test_svd_build(self, level=rlevel):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)