def norm_of_columns(A, p=2):
"""Vector p-norm of each column of a matrix.
Parameters
----------
A : array_like
Input matrix.
p : int, optional
p-th norm.
Returns
-------
array_like
p-norm of each column of A.
"""
_, N = A.shape
return np.asarray([linalg.norm(A[:, j], ord=p) for j in range(N)])
python类norm()的实例源码
def coherence_of_columns(A):
"""Mutual coherence of columns of A.
Parameters
----------
A : array_like
Input matrix.
p : int, optional
p-th norm.
Returns
-------
array_like
Mutual coherence of columns of A.
"""
A = np.asmatrix(A)
_, N = A.shape
A = A * np.asmatrix(np.diag(1/norm_of_columns(A)))
Gram_A = A.H*A
for j in range(N):
Gram_A[j, j] = 0
return np.max(np.abs(Gram_A))
def test_fed():
print 'test_fed'
t1,U,m=1.0,4.0,4
lattice=Square('S1')('%sP-1O'%m)
config=IDFConfig(priority=DEFAULT_FERMIONIC_PRIORITY,pids=lattice.pids,map=lambda pid: Fermi(norbital=1,nspin=2,nnambu=1))
basis=FBasis(m*2,m,0.0)
ed=FED(name='OneD_%s_%s'%(lattice.name,basis.rep),sectors=[basis],lattice=lattice,config=config,terms=[Hopping('t1',t1),Hubbard('U',U)],dtype=np.complex128)
eigvals0=eigh(ed.matrix(basis.rep).todense(),eigvals_only=True)
basis=TRBasis(FBasis(m*2,m,0.0),dk=2,nk=m)
ed=TrFED(name='OneD_%s_%s'%(lattice.name,basis.rep),basis=basis,lattice=lattice,config=config,terms=[Hopping('t1',t1),Hubbard('U',U)],dtype=np.complex128)
eigvals1=[]
for k in xrange(m):
eigvals1.append(eigh(ed.matrix(k=k).todense(),eigvals_only=True))
eigvals1=sorted(np.concatenate(eigvals1))
print 'diff: %s'%norm(eigvals0-eigvals1)
print
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def switchcomp2(self, prev_para, prev_para2, curr_para):
bet = 0
ent = 6
if prev_para.size == 12:
distrec = np.finfo(np.float64).max
indrec = [-1, -1]
for i in xrange(2):
for j in xrange(2):
if i == j:
continue
tempdist = np.linalg.norm(prev_para[bet:ent] - curr_para[6 * i + bet:6 * i + ent]) + np.linalg.norm(
prev_para[6 + bet:6 + ent] - curr_para[6 * j + bet:6 * j + ent]) \
+ np.linalg.norm(
prev_para2[bet:ent] - curr_para[6 * i + bet:6 * i + ent]) + np.linalg.norm(
prev_para2[6 + bet:6 + ent] - curr_para[6 * j + bet:6 * j + ent])
if tempdist < distrec:
distrec = tempdist
indrec = [i, j]
fin_para = np.concatenate(
(curr_para[6 * indrec[0]:6 * indrec[0] + 6], curr_para[6 * indrec[1]:6 * indrec[1] + 6]))
return fin_para
def word_features(table):
"""
Extract word features into a normalized matrix
"""
features = numpy.zeros((len(table), 620), dtype='float32')
keys = table.keys()
for i in range(len(table)):
f = table[keys[i]]
features[i] = f / norm(f)
return features
def computeMatrixConvergence(prev, new):
return la.norm(new-prev)
def computeListOfListsConvergence(prev, new):
assert len(prev) == len(new)
max_diff = 0
for i in range(len(prev)):
diff = la.norm(np.array(new[i])-np.array(prev[i]))
if diff > max_diff:
max_diff = diff
return max_diff
def computeEtaDifference(self):
max_diff = 0
for t in range(self.n_tasks):
last_eta_list = self.last_eta[t,:]
eta_list = self.eta[t,:]
norm = la.norm(last_eta_list - eta_list)
if norm > max_diff:
max_diff = norm
return max_diff
def word_features(table):
"""
Extract word features into a normalized matrix
"""
features = numpy.zeros((len(table), 620), dtype='float32')
keys = table.keys()
for i in range(len(table)):
f = table[keys[i]]
features[i] = f / norm(f)
return features
def kl_preds_v2(model,sess,s_test,a_test,n_rep_per_item=200):
## Compare sample distribution to ground truth
Env = grid_env(False)
n_test_items,state_size = s_test.shape
distances = np.empty([n_test_items,3])
for i in range(n_test_items):
state = s_test[i,:].astype('int32')
action = np.round(a_test[i,:]).astype('int32')
# ground truth
state_truth = np.empty([n_rep_per_item,s_test.shape[1]])
for o in range(n_rep_per_item):
Env.set_state(state.flatten())
s1,r,dead = Env.step(action.flatten())
state_truth[o,:] = s1
truth_count,bins = np.histogramdd(state_truth,bins=[np.arange(8)-0.5]*state_size)
truth_prob = truth_count/n_rep_per_item
# predictions of model
y_sample = sess.run(model.y_sample,{ model.x : state[None,:].repeat(n_rep_per_item,axis=0),
model.y : np.zeros(np.shape(state[None,:])).repeat(n_rep_per_item,axis=0),
model.a : action[None,:].repeat(n_rep_per_item,axis=0),
model.Qtarget : np.zeros(np.shape(action[None,:])).repeat(n_rep_per_item,axis=0),
model.lr : 0,
model.lamb : 1,
model.temp : 0.00001,
model.is_training : False,
model.k: 1})
sample_count,bins = np.histogramdd(y_sample,bins=[np.arange(8)-0.5]*state_size)
sample_prob = sample_count/n_rep_per_item
distances[i,0]= np.sum(truth_prob*(np.log(truth_prob+1e-5)-np.log(sample_prob+1e-5))) # KL(p|p_tilde)
distances[i,1]= np.sum(sample_prob*(np.log(sample_prob+1e-5)-np.log(truth_prob+1e-5))) # Inverse KL(p_tilde|p)
distances[i,2]= norm(np.sqrt(truth_prob) - np.sqrt(sample_prob))/np.sqrt(2)
return np.mean(distances,axis=0)
def word_features(table):
"""
Extract word features into a normalized matrix
"""
features = numpy.zeros((len(table), 620), dtype='float32')
keys = table.keys()
for i in range(len(table)):
f = table[keys[i]]
features[i] = f / norm(f)
return features
def main():
file1, file2 = sys.argv[1:1+2]
# read images as 2D arrays (convert to grayscale for simplicity)
img1 = to_grayscale(imread(file1).astype(float))
img2 = to_grayscale(imread(file2).astype(float))
# compare
n_m, n_0 = compare_images(img1, img2)
print "Manhattan norm:", n_m, "/ per pixel:", n_m/img1.size
print "Zero norm:", n_0, "/ per pixel:", n_0*1.0/img1.size
def compare_images(img1, img2):
# normalize to compensate for exposure difference, this may be unnecessary
# consider disabling it
img1 = normalize(img1)
img2 = normalize(img2)
# calculate the difference and its norms
diff = img1 - img2 # elementwise for scipy arrays
m_norm = sum(abs(diff)) # Manhattan norm
z_norm = norm(diff.ravel(), 0) # Zero norm
return (m_norm, z_norm)
def _sigma(matrix, mu):
n, p = matrix.shape
if p > n:
tmp = np.dot(matrix, matrix.T)
else:
tmp = np.dot(matrix.T, matrix)
return (la.norm(tmp, 2)/n) + mu
##############################################################################
# Models
def ridge_regression(data, labels, mu=0.0):
r"""Implementation of the Regularized Least Squares solver.
It solves the ridge regression problem with parameter ``mu`` on the
`l2-norm`.
Parameters
----------
data : (N, P) ndarray
Data matrix.
labels : (N,) or (N, 1) ndarray
Labels vector.
mu : float, optional (default is `0.0`)
`l2-norm` penalty.
Returns
--------
beta : (P, 1) ndarray
Ridge regression solution.
Examples
--------
>>> X = numpy.array([[0.1, 1.1, 0.3], [0.2, 1.2, 1.6], [0.3, 1.3, -0.6]])
>>> beta = numpy.array([0.1, 0.1, 0.0])
>>> Y = numpy.dot(X, beta)
>>> beta = l1l2py.algorithms.ridge_regression(X, Y, 1e3).T
>>> len(numpy.flatnonzero(beta))
3
"""
n, p = data.shape
if n < p:
tmp = np.dot(data, data.T)
if mu:
tmp += mu * n * np.eye(n)
tmp = la.pinv(tmp)
return np.dot(np.dot(data.T, tmp), labels.reshape(-1, 1))
else:
tmp = np.dot(data.T, data)
if mu:
tmp += mu * n * np.eye(p)
tmp = la.pinv(tmp)
return np.dot(tmp, np.dot(data.T, labels.reshape(-1, 1)))
def _sigma(matrix, mu):
n, p = matrix.shape
if p > n:
tmp = np.dot(matrix, matrix.T)
else:
tmp = np.dot(matrix.T, matrix)
return (la.norm(tmp, 2)/n) + mu
def _sigma(matrix, mu):
n, p = matrix.shape
if p > n:
tmp = np.dot(matrix, matrix.T)
else:
tmp = np.dot(matrix.T, matrix)
return (la.norm(tmp, 2)/n) + mu
def _fixed_step(self, t, fields, dt, pars,
hook=null_hook):
fields = fields.copy()
fields, pars = hook(t, fields, pars)
J = self._model.J(fields, pars)
Id = self.__cache__(fields.uflat.size)
self._A = A = Id - self._gamma[0, 0] * dt * J
luf = sps.linalg.factorized(A)
ks = []
fields_i = fields.copy()
for i in np.arange(self._s):
fields_i.fill(fields.uflat +
sum([self._alpha[i, j] * ks[j]
for j in range(i)]))
F = self._model.F(fields_i, pars)
ks.append(luf(dt * F + dt * (J @ sum([self._gamma[i, j] *
ks[j]
for j
in range(i)])
if i > 0 else 0)))
U = fields.uflat.copy()
U = U + sum([bi * ki for bi, ki in zip(self._b, ks)])
U_pred = (U + sum([bi * ki
for bi, ki
in zip(self._b_pred, ks)])
if self._b_pred is not None else None)
fields.fill(U)
return t + dt, fields, (norm(U - U_pred, np.inf)
if U_pred is not None else None)