def _solve_topic(lo, hi, theta, alpha, gamma, XT, BTBpR, c0, c1, f, lam_d, lam_t):
topic_batch = np.empty((hi - lo, f), dtype=theta.dtype)
for ib, u in enumerate(range(lo, hi)):
x_u, idx_u = get_row(XT, u)
B_u = theta[idx_u]
cpAT = gamma[u].dot(alpha.T)
a = lam_d * x_u.dot(c1 * B_u) + lam_t * cpAT
'''
non-zero elements handled in this loop
'''
B = BTBpR + B_u.T.dot((c1 - c0) * B_u)#B_u only contains vectors corresponding to non-zero doc-word occurence
topic_batch[ib] = LA.solve(B, a)
topic_batch = topic_batch.clip(0)
return topic_batch
python类solve()的实例源码
def _solve_gamma(lo, hi, alpha, beta, topic, M, B, c0, c1, f, lam_w, lam_t):
gamma_batch = np.empty((hi - lo, f), dtype=alpha.dtype)
for ib, i in enumerate(range(lo, hi)):
t_i = topic[i,:]
m_i, idx_m_i = get_row(M, i)
B_i = beta[idx_m_i]
'''
the reason why they put G_i in the loop instead of calculate GTG = gamma.T * gamma is that in the objective function,
we currently only consider the non-zero elements in matrix W.
'''
a = lam_t * np.dot(t_i, alpha) + lam_w * np.dot(m_i, B_i)
gamma_batch[ib] = LA.solve(B, a)
return gamma_batch
def _solve_beta(lo, hi, gamma, MT, B, f):
beta_batch = np.empty((hi - lo, f), dtype=gamma.dtype)
for ib, j in enumerate(range(lo, hi)):
m_j, idx_m_j = get_row(MT, j)
C_j = gamma[idx_m_j]
a = np.dot(m_j, C_j)
beta_batch[ib] = LA.solve(B, a)
return beta_batch
def _solve_alpha(lo, hi, gamma, topicT, B, f):
alpha_batch = np.empty((hi - lo, f), dtype=gamma.dtype)
for ib, j in enumerate(range(lo, hi)):
t_j = topicT[j,:]
a = np.dot(t_j, gamma)
alpha_batch[ib] = LA.solve(B, a)
return alpha_batch
def _estimate_gradient(self, policy):
env = self.environment
var = self.var
# store current policy parameter
parameter = policy.parameters
par_dim = policy.parameter_space.dimension
# using forward differences
trace = env.rollout(policy)
j_ref = sum([x[2] for x in trace]) / len(trace)
dj = np.zeros((2 * par_dim))
dv = np.append(np.eye(par_dim), -np.eye(par_dim), axis=0)
dv *= var
for n in range(par_dim):
variation = dv[n]
policy.parameters = parameter + variation
trace_n = env.rollout(policy)
jn = sum([x[2] for x in trace]) / len(trace_n)
dj[n] = j_ref - jn
grad = solve(dv.T.dot(dv), dv.T.dot(dj))
# reset current policy parameter
policy.parameters = parameter
return grad
def _estimate_gradient(self, policy):
env = self.environment
parameter = policy.parameters
par_dim = policy.parameter_space.dimension
dj = np.zeros((par_dim,))
dv = np.eye(par_dim) * self.var / 2
for n in range(par_dim):
variation = dv[n]
policy.parameters = parameter + variation
trace_n = env.rollout(policy)
policy.parameters = parameter - variation
trace_n_ref = env.rollout(policy)
jn = sum([x[2] for x in trace_n]) / len(trace_n)
jn_ref = sum([x[2] for x in trace_n_ref]) / len(trace_n_ref)
dj[n] = jn - jn_ref
grad = solve(dv.T.dot(dv), dv.T.dot(dj))
policy.parameters = parameter
return grad
def _omp(x, D, Gram, alpha, n_nonzero_coefs=None, tol=None):
_, n_atoms = D.shape
# the dict indexes of the atoms this datapoint uses
Dx = np.array([]).astype(int)
z = np.zeros(n_atoms)
# the residual
r = np.copy(x)
i = 0
if n_nonzero_coefs is not None:
tol = 1e-10
def cont_criterion():
not_reached_sparsity = i < n_nonzero_coefs
return (not_reached_sparsity and norm(r) > tol)
else:
cont_criterion = lambda: norm(r) >= tol
while (cont_criterion()):
# find the atom that correlates the
# most with the residual
k = np.argmax(np.abs(alpha))
if k in Dx:
break
Dx = np.append(Dx, k)
# solve the Least Squares problem
# to find the coefs z
DI = D[:, Dx]
G = Gram[Dx, :][:, Dx]
G = np.atleast_2d(G)
try:
G_inv = inv(G)
except LinAlgError:
print gram_singular_msg
break
z[Dx] = np.dot(G_inv, np.dot(D.T, x)[Dx])
r = x - np.dot(D[:, Dx], z[Dx])
alpha = np.dot(D.T, r)
i += 1
return z
def llc(X, D, knn=5):
# the sparse coder introduced in
# "Locality-constrained Linear Coding for Image Classification"
n_samples = X.shape[1]
n_atoms = D.shape[1]
# has the distance of
# each sample to each atom
dist = np.zeros((n_samples, n_atoms))
# calculate the distances
for i in range(n_samples):
for j in range(n_atoms):
dist[i, j] = norm(X[:, i] - D[:, j])
# has the indices of the atoms
# that are nearest neighbour to each sample
knn_idx = np.zeros((n_samples, knn)).astype(int)
for i in xrange(n_samples):
knn_idx[i, :] = np.argsort(dist[i, :])[:knn]
# the sparse coding matrix
Z = np.zeros((n_atoms, n_samples))
II = np.eye(knn)
beta = 1e-4
b = np.ones(knn)
for i in xrange(n_samples):
idx = knn_idx[i, :]
z = D.T[idx, :] - repmat(X.T[i, :], knn, 1)
C = np.dot(z, z.T)
C = C + II * beta * np.trace(C)
# solve the linear system C*c=b
c = solve(C, b)
# enforce the constraint on the sparse codes
# such that sum(c)=1
c = c / float(np.sum(c))
Z[idx, i] = c
return Z
def do(self, a, b):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def _refit(self, X):
n_samples, n_features = X.shape
for i in range(n_samples):
X_subset = X.data[X.indptr[i]:X.indptr[i + 1]]
subset = X.indices[X.indptr[i]:X.indptr[i + 1]]
len_subset = subset.shape[0]
reduction = n_features / len_subset
components_subset = self.components_[:, subset]
Dx = components_subset.dot(X_subset)
G = components_subset.dot(components_subset.T)
G.flat[::self.n_components + 1] += self.alpha / reduction
self.code_[i] = linalg.solve(G, Dx)
def fvar(self):
"""Estimate the variances of the reduced free energies."""
# Shorthand indices - notation similiar to Ref. 1
n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates
k = mpk - m
mask0, maskn0 = self.mask_zero, self.mask_nonzero
_W_nj = self._W_nj
O = _W_nj.T.dot(_W_nj) / n
Os = O[:, :m]
B1 = (hstack((Os.dot(self.PIs), zeros((mpk, k))))
- identity(mpk))[1:, 1:]
A1 = (O - Os.dot(self.PIs).dot(Os.T))[1:, 1:]
V = solve(B1, A1).dot(inv(B1.T)) / n
# if any(diagonal(V) < 0.):
# D = _W_nj.T.dot(self._R_nj) / n
# A1 = (O - D.dot(self.PIs).dot(D.T))[1:, 1:]
# V = solve(B1, A1).dot(inv(B1.T)) / n
# Unshuffle the state indices. Note that the variance of state 0 is
# zero by construction and thus is omitted from V - since the user
# selected state 0 may be different from the lowest indexed sample
# state, re-adjust so that the actual state 0 has zero variance.
#
V_full = zeros((mpk, mpk))
V_full[1:, 1:] = V
var = zeros(mpk)
var[maskn0] += diagonal(V_full)[:m]
var[mask0] += diagonal(V_full)[m:]
var += var[0]
var[0] = 0.0
return var
def solve(self):
# Solve LS
self.VU = [node[key] for node in self.U.values() for key in ("ux","uy")]
self.VF = [node[key] for node in self.F.values() for key in ("fx","fy")]
knw = [pos for pos,value in enumerate(self.VU) if not value is np.nan]
unknw = [pos for pos,value in enumerate(self.VU) if value is np.nan]
self.K2S = np.delete(np.delete(self.KG,knw,0),knw,1)
self.F2S = np.delete(self.VF,knw,0)
# For displacements
self.solved_u = la.solve(self.K2S,self.F2S)
for k,ic in enumerate(unknw):
nd, var = self.index2key(ic)
self.U[nd][var] = self.solved_u[k]
# Updating nodes displacements
for nd in self.nodes.values():
if np.isnan(nd.ux):
nd.ux = self.U[nd.label]["ux"]
if np.isnan(nd.uy):
nd.uy = self.U[nd.label]["uy"]
# For nodal forces/reactions
self.NF = self.F.copy()
self.VU = [node[key] for node in self.U.values() for key in ("ux","uy")]
nf_calc = np.dot(self.KG, self.VU)
for k in range(2*self.get_number_of_nodes()):
nd, var = self.index2key(k, ("fx","fy"))
self.NF[nd][var] = nf_calc[k]
cnlab = np.floor(k/float(self.dof))
if var=="fx":
self.nodes[cnlab].fx = nf_calc[k]
elif var=="fy":
self.nodes[cnlab].fy = nf_calc[k]
def solve(self):
# known and unknown values
self.VU = [node[key] for node in self.U.values() for key in ("ux",)]
self.VF = [node[key] for node in self.F.values() for key in ("fx",)]
knw = [pos for pos,value in enumerate(self.VU) if not value is np.nan]
unknw = [pos for pos,value in enumerate(self.VU) if value is np.nan]
if len(unknw)==1:
_k = unknw[0]
_rowtmp = self.KG[_k,:]
_ftmp = self.VF[_k]
_fk = _ftmp - np.dot(np.delete(_rowtmp,_k), np.delete(self.VU,_k))
_uk = _fk / self.KG[_k, _k]
# Then
self.solved_u = np.array([_uk])
else: # "Normal" case
self.K2S = np.delete(np.delete(self.KG,knw,0),knw,1)
self.F2S = np.delete(self.VF,knw,0)
self.solved_u = la.solve(self.K2S,self.F2S)
# For displacements
# Updating U (displacements vector)
for k,ic in enumerate(unknw):
nd, var = self.index2key(ic)
self.U[nd][var] = self.solved_u[k]
self.nodes[ic].ux = self.solved_u[k]
# For nodal forces/reactions
self.NF = self.F.copy()
self.VU = [node[key] for node in self.U.values() for key in ("ux",)]
nf_calc = np.dot(self.KG, self.VU)
for k,ic in enumerate(range(self.get_number_of_nodes())):
nd, var = self.index2key(ic, ("fx",))
self.NF[nd][var] = nf_calc[k]
self.nodes[ic].fx = nf_calc[k]
def solve(self):
# Solve LS
self.VU = [node[key] for node in self.U.values() for key in ("uy","ur")]
self.VF = [node[key] for node in self.F.values() for key in ("fy","m")]
knw = [pos for pos,value in enumerate(self.VU) if not value is np.nan]
unknw = [pos for pos,value in enumerate(self.VU) if value is np.nan]
self.K2S = np.delete(np.delete(self.KG,knw,0),knw,1)
self.F2S = np.delete(self.VF,knw,0)
# For displacements
self.solved_u = la.solve(self.K2S,self.F2S)
for k,ic in enumerate(unknw):
nd, var = self.index2key(ic)
self.U[nd][var] = self.solved_u[k]
# Updating nodes displacements
for nd in self.nodes.values():
if np.isnan(nd.uy):
nd.uy = self.U[nd.label]["uy"]
if np.isnan(nd.ur):
nd.ur = self.U[nd.label]["ur"]
# For nodal forces/reactions
self.NF = self.F.copy()
self.VU = [node[key] for node in self.U.values() for key in ("uy","ur")]
nf_calc = np.dot(self.KG, self.VU)
for k in range(2*self.get_number_of_nodes()):
nd, var = self.index2key(k, ("fy","m"))
self.NF[nd][var] = nf_calc[k]
cnlab = np.floor(k/float(self.dof))
if var=="fy":
self.nodes[cnlab].fy = nf_calc[k]
elif var=="m":
self.nodes[cnlab].m = nf_calc[k]
def solve(self):
self._check_nodes()
# Solve LS
self.VU = [node[key] for node in self.U.values() for key in ("ux","uy")]
self.VF = [node[key] for node in self.F.values() for key in ("fx","fy")]
knw = [pos for pos,value in enumerate(self.VU) if not value is np.nan]
unknw = [pos for pos,value in enumerate(self.VU) if value is np.nan]
self.K2S = np.delete(np.delete(self.KG,knw,0),knw,1)
self.F2S = np.delete(self.VF,knw,0)
# For displacements
try:
self.solved_u = la.solve(self.K2S,self.F2S)
except:
print("Solved using LSTSQ")
self.solved_u = la.lstsq(self.K2S, self.F2S)[0]
for k,ic in enumerate(unknw):
nd, var = self.index2key(ic)
self.U[nd][var] = self.solved_u[k]
# Updating nodes displacements
for nd in self.nodes.values():
if np.isnan(nd.ux):
nd.ux = self.U[nd.label]["ux"]
if np.isnan(nd.uy):
nd.uy = self.U[nd.label]["uy"]
# For nodal forces/reactions
self.NF = self.F.copy()
self.VU = [node[key] for node in self.U.values() for key in ("ux","uy")]
nf_calc = np.dot(self.KG, self.VU)
for k in range(2*self.get_number_of_nodes()):
nd, var = self.index2key(k, ("fx","fy"))
self.NF[nd][var] = nf_calc[k]
cnlab = np.floor(k/float(self.dof))
if var=="fx":
self.nodes[cnlab].fx = nf_calc[k]
elif var=="fy":
self.nodes[cnlab].fy = nf_calc[k]
math.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def solve(a, b):
"""Returns the solution of A X = B."""
try:
return linalg.solve(a, b)
except linalg.LinAlgError:
return np.dot(linalg.pinv(a), b)
test_linalg.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def do(self, a, b):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
test_linalg.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype