def transformation_from_points(points1, points2):
points1 = points1.astype(np.float64)
points2 = points2.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = np.linalg.svd(points1.T * points2)
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
np.matrix([0., 0., 1.])])
python类matrix()的实例源码
def KeyGen(n, m, k, d, q):
'''
input:
q : polynomial size prime number
n, m, k : dimensions specifiers
d : SIS parameter, hardest instances are where d ~ q^(n/m)
output:
Signing Key S : Matrix of dimension mxk with coefficients in [-d.d]
Verification Key A : Matrix of dimension nxm with coefficients from [-(q-1)/2,(q-1)/2]
T : the matrix AS ,it is used in the Verification of the signature
'''
S = crypt_secure_matrix(d, m, k)
A = crypt_secure_matrix((q-1)/2, n, m)
T = np.matmul(A, S)
return S, A, T
def test_calculate_slopes_no_mask():
mat = np.matrix([[1, 5, 7, 0, 12, 14],
[12, 5, 7, 18, 0, 0],
[6, 2, 9, 17, 0, 5]])
tp = np.array([0, 5, 7, 11, 15, 19])
mask = np.array(['1', '1', '1', '1', '1', '1'])
res = calculate_slopes(mat, tp, mask)
true_res = np.matrix([[0.8, 1, -1.75, 3, 0.5],
[-1.4, 1, 2.75, -4.5, 0],
[-0.8, 3.5, 2, -4.25, 1.25]])
assert (res - true_res).sum() <= 10e-10
#def generate_STS_distance_matrix(slope_matrix, nthreads)
#def test_generate_STS_distance_matrix():
# pass
#def sts_matrix_generator(ind, slope_matrix)
#def test_sts_matrix_generator():
# pass
#def cluster_dbscan(dist_matrix, eps=1, distance_measure="sts")
#def test_cluster_dbscan():
# pass
#def zscore(x)
def timeseriesdata_constructor_existing_file():
"""Tests the TimeSeriesData class constructor when the file exists. Tests
that the data are as expected, and that the filled data flag is set."""
tsd = TimeSeriesData("./tests/data/small.h5")
assert tsd.filled_data
#Validate that the time-series matrix data are correct
dat = np.matrix([[0, 2, 0, 0],
[2, 1, 3, 4],
[1, 1, 1, 1],
[0, 0, 1, 0]])
assert (dat == tsd.get_sparse_matrix().todense()).all()
assert (tsd.get_time_points() == [0, 4, 10, 12]).all()
sequence_ids = np.array([b'9247ec5fd33e99387d41e8fc0d7ee278',
b'53f74905f7826fee79dd09b0c12caddf',
b'8829fefe91ead511a30df118060b1030',
b'7f97b4991342bf629966aeac0708c94f'])
assert (sequence_ids == tsd.get_array_by_chunks("genes/sequenceids")).all()
sample_names = np.array([b'72c', b'29', b'qpa', b'15'])
assert (sample_names == tsd.get_array_by_chunks("samples/names")).all()
# __del__(self)
#Test if HDF5 file closed
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))
params = int(theta.ravel().shape[1]) #flattens
cost = np.zeros(iters)
for i in range(iters):
err = (X * theta.T) - y
for j in range(params):
term = np.multiply(err, X[:,j])
temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
theta = temp
cost[i] = computeCost(X, y, theta)
return theta, cost
def computeCost(X, y, theta):
inner = np.power(((X * theta.T) - y), 2)
return np.sum(inner) / (2 * len(X))
#def gradientDescent(X, y, theta, alpha, iters):
# temp = np.matrix(np.zeros(theta.shape))
# params = int(theta.ravel().shape[1]) #flattens
# cost = np.zeros(iters)
#
# for i in range(iters):
# err = (X * theta.T) - y
#
# for j in range(params):
# term = np.multiply(err, X[:,j])
# temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
#
# theta = temp
# cost[i] = computeCost(X, y, theta)
#
# return theta, cost
def adapt_z_state(self,main_rdd, cinfo,beta):
ainv = cinfo
def Updatez(tpl):
tt=[]
for ((tx,lam,state,z),index) in tpl:
random.setstate(state)
p = random.random()
state = random.getstate()
if p<self.ptr:
znew=float(-np.matrix(tx)*ainv*np.matrix(tx).T)
else:
znew=0.0
z=(1-beta)*z+beta*znew
tt.append(((tx,lam,state,z),index))
return tt
main_rdd = main_rdd.mapValues(Updatez).cache()
return main_rdd
def update_lam_z(self, main_rdd, cinfo,iStar,Gamma):
ainv = cinfo
def update(t, ainv):
p=[]
[tpl, gen] = t
for ((tx,lam,z),index) in tpl:
znew=0.0
if gen.random()<self.ptr:
znew=-(np.matrix(tx)*ainv*np.matrix(tx).T)[0,0]
else:
znew= 0.0
zupdtd = (1.0-self.beta)*z + self.beta*znew
if index != iStar:
lamupdt = (1-Gamma)*lam
else:
lamupdt = (1-Gamma)*lam +lam
p.append(((tx,lamupdt,zupdtd),index))
out = [p,gen]
return out
main_rdd = main_rdd.mapValues(lambda t:update(t,ainv)).persist()
return main_rdd
def update_lam_z(self, main_rdd, cinfo,iStar,Gamma):
ainv,ainv2 = cinfo
def update(t, ainv2):
p=[]
[tpl, gen] = t
for ((tx,lam,z),index) in tpl:
znew=0.0
if gen.random()<self.ptr:
znew=-(np.matrix(tx)*ainv2*np.matrix(tx).T)[0,0]
else:
znew= 0.0
zupdtd = (1.0-self.beta)*z + self.beta*znew
if index != iStar:
lamupdt = (1-Gamma)*lam
else:
lamupdt = (1-Gamma)*lam +lam
p.append(((tx,lamupdt,zupdtd),index))
out = [p,gen]
return out
main_rdd = main_rdd.mapValues(lambda t:update(t,ainv2)).persist()
return main_rdd
def update_comm_info(self,cinfo,iStar,mingrad,txmin,Gamma):
def UpdateAinv2(binv2,u,v,UVT,Denom,alpha,Xi):
return binv2-alpha*UVT/Denom-alpha*UVT.T/Denom+alpha**2*UVT*Xi*u.T/Denom**2
def UpdateAinv3(binv3,U1,U2,U3,D,Xi,Gamma):
return binv3-Gamma*U2*U2.T/D-Gamma*U1*U3.T/D-Gamma*U3*U1.T/D+Gamma**2*U1*U2.T*Xi*U2.T/D**2+Gamma**2*U2*U2.T*Xi*U1.T/D**2+Gamma**2*U1*U3.T*Xi*U1.T/D**2-Gamma**3*U1*U2.T*Xi*U2.T*Xi*U1.T/D**3
ainv,ainv2,ainv3=cinfo
binv=1.0/(1.0-Gamma)*ainv
binv2=1.0/(1.0-Gamma)**2*ainv2
binv3=1.0/(1.0-Gamma)**3*ainv3
u1=binv*np.matrix(txmin).T
u2=binv2*np.matrix(txmin).T
u3=binv3*np.matrix(txmin).T
UVT=u1*u2.T
Denom=1+Gamma*u1.T*np.matrix(txmin).T
ainv=rankOneInvUpdate(binv,Gamma*np.matrix(txmin).T,np.matrix(txmin).T)
ainv2=UpdateAinv2(binv2,u1,u2,UVT,Denom,Gamma,np.matrix(txmin).T)
ainv3=UpdateAinv3(binv3,u1,u2,u3,Denom,np.matrix(txmin).T,Gamma)
return ainv,ainv2,ainv3
def compute_mingrad_l1(self,main_rdd,cinfo,K):
R = cinfo
def maxmin_l1(tpl1,tpl2):
(z1,x1,lam1,i1)=tpl1
(z2,x2,lam2,i2)=tpl2
zt = max(abs(z1),abs(z2))
if zt>abs(z2):
out = (z1,x1,lam1,i1)
else:
out = (z2,x2,lam2,i2)
return out
def CompMingrad(tpl):
p=[]
for ((tx,lam),index) in tpl:
p.append(((np.matrix(tx)*R)[0,0],tx,lam,index))
return p
(mingrad,xmin,lambdaMin,iStar)=main_rdd.flatMapValues(CompMingrad).map(lambda (key, value):value).reduce(maxmin_l1)
s_star = -np.sign(mingrad)
return (mingrad,xmin,lambdaMin,iStar,s_star)
def update_lam_z(self, main_rdd, cinfo,iStar,Gamma):
R = cinfo
def update(t, R):
p=[]
[tpl, gen] = t
for ((tx,lam,z),index) in tpl:
znew=0.0
if gen.random()<self.ptr:
znew=(2*np.matrix(tx)*R)[0,0]
else:
znew= 0.0
zupdtd = (1.0-self.beta)*z + self.beta*znew
if index != iStar:
lamupdt = (1-Gamma)*lam
else:
lamupdt = (1-Gamma)*lam +lam
p.append(((tx,lamupdt,zupdtd),index))
out = [p,gen]
return out
main_rdd = main_rdd.mapValues(lambda t:update(t,R)).persist()
return main_rdd
def gen_comm_info(self,main_rdd):
def cominfo(tpl):
p=[]
for ((tx,lam),index) in tpl:
p.append(np.matrix(tx).T*lam)
return p
def findDim(tpl):
for ((tx,lam),index) in tpl:
d = len(tx)
return d
d = main_rdd.mapValues(findDim).values().reduce(lambda x,y:x)
c=main_rdd.flatMapValues(cominfo).map(lambda (key,value):value).reduce(lambda x,y:x+y)
V=matrix(0.0,(d,1))
for j in range(d):
V[j]=math.exp(-self.C*self.r[j]*c[j,0])
return d,V
def computegap(self,cinfo,main_rdd,iStar,mingrad,lambdaMin):
d,V = cinfo
z=matrix(0.0,(d,1))
vSum=float(np.sum(V))
for j in range(d):
z[j]=-V[j]*self.C*self.r[j]/vSum
def CompGap(tpl,lambdaMin,mingrad,iStar):
p=[]
for ((tx,lam),index) in tpl:
if index!=iStar:
p.append((np.matrix(tx)*np.matrix(z))[0,0]*lam)
else:
p.append((lambdaMin-1)*mingrad)
return p
gap=main_rdd.flatMapValues(lambda tpl:CompGap(tpl,lambdaMin,mingrad,iStar)).map(lambda (key, value):value).reduce(lambda x,y:x+y)
return gap
def get_landmarks(self,im,fname,n=2):
'''
??????????????????????????????
im:
???numpy??
fname:
????????
???:
????(x,y)?????
'''
rects = self.detector(im, 1)
if len(rects) >=5:
raise TooManyFaces('Too many faces in '+fname)
if len(rects) <2:
raise NoFace('No enough face in' +fname)
return [np.matrix([[p.x, p.y] for p in self.predictor(im, rect).parts()]) for rect in rects]
def get_landmarks(self,im,fname,n=1):
'''
??????????????????????????????
im:
???numpy??
fname:
????????
???:
????(x,y)?????
'''
rects = self.detector(im, 1)
if len(rects) > n:
raise TooManyFaces('No face in '+fname)
if len(rects) < 0:
raise NoFace('Too many faces in '+fname)
return np.matrix([[p.x, p.y] for p in self.predictor(im, rects[0]).parts()])
def __init__(self, originFilename):
self._originFilename = originFilename
if originFilename is None:
self._name = 'None'
else:
self._name = os.path.basename(originFilename)
if '.' in self._name:
self._name = os.path.splitext(self._name)[0]
self._meshList = []
self._position = numpy.array([0.0, 0.0])
self._matrix = numpy.matrix([[1,0,0],[0,1,0],[0,0,1]], numpy.float64)
self._transformedMin = None
self._transformedMax = None
self._transformedSize = None
self._boundaryCircleSize = None
self._drawOffset = None
self._boundaryHull = None
self._printAreaExtend = numpy.array([[-1,-1],[ 1,-1],[ 1, 1],[-1, 1]], numpy.float32)
self._headAreaExtend = numpy.array([[-1,-1],[ 1,-1],[ 1, 1],[-1, 1]], numpy.float32)
self._headMinSize = numpy.array([1, 1], numpy.float32)
self._printAreaHull = None
self._headAreaHull = None
self._headAreaMinHull = None
self._loadAnim = None
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
# matrix vs ndarray
a = np.matrix([[1, 2], [3, 4]])
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
assert_(type(b) != type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
# matrix always wants things to be 2D
b = np.arange(4).reshape(1, 2, 2)
assert_raises(RuntimeError, nditer, [a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
# but if subtypes are disabled, the result can still work
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
assert_(type(a) != type(i.operands[2]))
assert_equal(i.operands[2].shape, (1, 2, 2))
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_basic(self):
y1 = np.array([1, 2, 3])
assert_(average(y1, axis=0) == 2.)
y2 = np.array([1., 2., 3.])
assert_(average(y2, axis=0) == 2.)
y3 = [0., 0., 0.]
assert_(average(y3, axis=0) == 0.)
y4 = np.ones((4, 4))
y4[0, 1] = 0
y4[1, 0] = 2
assert_almost_equal(y4.mean(0), average(y4, 0))
assert_almost_equal(y4.mean(1), average(y4, 1))
y5 = rand(5, 5)
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
y6 = np.matrix(rand(5, 5))
assert_array_equal(y6.mean(0), average(y6, 0))
def test_return_type(self):
a = np.ones([2, 2])
m = np.asmatrix(a)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(m, m)), np.matrix)
assert_equal(type(kron(a, m)), np.matrix)
assert_equal(type(kron(m, a)), np.matrix)
class myarray(np.ndarray):
__array_priority__ = 0.0
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
assert_equal(type(kron(a, ma)), np.ndarray)
assert_equal(type(kron(ma, a)), myarray)
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(ValueError, norm, B, None, 3)
assert_raises(ValueError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1