def __init__(self,
orderx,xmin,xmax,
ordery,ymin,ymax):
"Constructor. Needs order and domain in x and y"
self.orderx,self.ordery = orderx,ordery
self.stencils = [PseudoSpectralDiscretization1D(orderx,xmin,xmax),
PseudoSpectralDiscretization1D(ordery,ymin,ymax)]
self.stencil_x,self.stencil_y = self.stencils
self.quads = [s.quads for s in self.stencils]
self.colocs = [s.colocation_points for s in self.stencils]
self.x,self.y = self.colocs
self.colocs2d = np.meshgrid(*self.colocs,indexing='ij')
self.X,self.Y = self.colocs2d
self.weights = [s.weights for s in self.stencils]
self.weights_x,self.weights_y = self.weights
self.weights2D = np.tensordot(*self.weights,axes=0)
python类tensordot()的实例源码
def itensordot(arrays, axes = 2):
"""
Yields the cumulative array inner product (dot product) of arrays.
Parameters
----------
arrays : iterable
Arrays to be reduced.
axes : int or (2,) array_like
* integer_like: If an int N, sum over the last N axes of a
and the first N axes of b in order. The sizes of the corresponding axes must match.
* (2,) array_like: Or, a list of axes to be summed over, first sequence applying to a,
second to b. Both elements array_like must be of the same length.
Yields
------
online_tensordot : ndarray
See Also
--------
numpy.tensordot : Compute the tensordot on two tensors.
"""
yield from _ireduce_linalg(arrays, np.tensordot, axes = axes)
def tensordot(self, other, axes):
"""Compute tensor dot product along named axes.
An error will be raised if the remaining axes of self and
other contain duplicate names.
:param other: Another named_ndarray instance
:param axes: List of axis name pairs (self_name, other_name)
to be contracted
:returns: Result as named_ndarray
"""
axes_self = [names[0] for names in axes]
axes_other = [names[1] for names in axes]
axespos_self = [self.axispos(name) for name in axes_self]
axespos_other = [other.axispos(name) for name in axes_other]
new_names = [name for name in self._axisnames if name not in axes_self]
new_names += (name for name in other._axisnames if name not in axes_other)
array = np.tensordot(self._array, other._array,
(axespos_self, axespos_other))
return named_ndarray(array, new_names)
def test_split(nr_sites, local_dim, rank, rgen):
if nr_sites < 2:
return
mpa = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
for pos in range(nr_sites - 1):
mpa_l, mpa_r = mpa.split(pos)
assert len(mpa_l) == pos + 1
assert len(mpa_l) + len(mpa_r) == nr_sites
assert_correct_normalization(mpa_l)
assert_correct_normalization(mpa_r)
recons = np.tensordot(mpa_l.to_array(), mpa_r.to_array(), axes=(-1, 0))
assert_array_almost_equal(mpa.to_array(), recons)
for (lnorm, rnorm) in it.product(range(nr_sites - 1), range(1, nr_sites)):
mpa_l, mpa_r = mpa.split(nr_sites // 2 - 1)
assert_correct_normalization(mpa_l)
assert_correct_normalization(mpa_r)
def transform(xi, cube):
'''Transform the points `xi` from the reference cube to `cube`.
'''
# For d==2, the result used to be computed with
#
# out = (
# + outer(0.25*(1.0-xi[0])*(1.0-xi[1]), cube[0, 0])
# + outer(0.25*(1.0+xi[0])*(1.0-xi[1]), cube[1, 0])
# + outer(0.25*(1.0-xi[0])*(1.0+xi[1]), cube[0, 1])
# + outer(0.25*(1.0+xi[0])*(1.0+xi[1]), cube[1, 1])
# )
#
# This array of multiplications and additions is reminiscent of dot(), and
# indeed tensordot() can handle the situation. We just need to compute the
# `1+-xi` products and align them with `cube`.
one_mp_xi = numpy.stack([
0.5 * (1.0 - xi),
0.5 * (1.0 + xi),
], axis=1)
a = helpers.n_outer(one_mp_xi)
# TODO kahan tensordot
# <https://stackoverflow.com/q/45372098/353337>
d = xi.shape[0]
return numpy.tensordot(a, cube, axes=(range(d), range(d)))
polarisation.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def polmatrixmultiply(cm, vec, polaxis=1):
"""Matrix multiply of appropriate axis of vec [...,:] by cm
For an image vec has axes [nchan, npol, ny, nx] and polaxis=1
For visibility vec has axes [row, nchan, npol] and polaxis=2
:param cm: matrix to apply
:param vec: array to be multiplied [...,:]
:param polaxis: which axis contains the polarisation
:return: multiplied vec
"""
if len(vec.shape) == 1:
return numpy.dot(cm, vec)
else:
# This tensor swaps the first two axes so we need to tranpose back
result = numpy.tensordot(cm, vec, axes=(1, polaxis))
permut = list(range(len(result.shape)))
permut[0], permut[polaxis] = permut[polaxis], permut[0]
return numpy.transpose(result, axes=permut)
def slice_recommendations(self, test_data, shape, start, end):
test_tensor_unfolded, slice_idx = self.get_test_tensor(test_data, shape, start, end)
num_users = end - start
num_items = shape[1]
num_fdbks = shape[2]
v = self._items_factors
w = self._feedback_factors
# assume that w.shape[1] < v.shape[1] (allows for more efficient calculations)
scores = test_tensor_unfolded.dot(w).reshape(num_users, num_items, w.shape[1])
scores = np.tensordot(scores, v, axes=(1, 0))
scores = np.tensordot(np.tensordot(scores, v, axes=(2, 1)), w, axes=(1, 1))
scores = self.flatten_scores(scores, self.flattener)
return scores, slice_idx
# additional functionality: rating pediction
def test_convolve_generalization():
ag_convolve = autograd.scipy.signal.convolve
A_35 = R(3, 5)
A_34 = R(3, 4)
A_342 = R(3, 4, 2)
A_2543 = R(2, 5, 4, 3)
A_24232 = R(2, 4, 2, 3, 2)
for mode in ['valid', 'full']:
assert npo.allclose(ag_convolve(A_35, A_34, axes=([1], [0]), mode=mode)[1, 2],
sp_convolve(A_35[1,:], A_34[:, 2], mode))
assert npo.allclose(ag_convolve(A_35, A_34, axes=([],[]), dot_axes=([0], [0]), mode=mode),
npo.tensordot(A_35, A_34, axes=([0], [0])))
assert npo.allclose(ag_convolve(A_35, A_342, axes=([1],[2]),
dot_axes=([0], [0]), mode=mode)[2],
sum([sp_convolve(A_35[i, :], A_342[i, 2, :], mode)
for i in range(3)]))
assert npo.allclose(ag_convolve(A_2543, A_24232, axes=([1, 2],[2, 4]),
dot_axes=([0, 3], [0, 3]), mode=mode)[2],
sum([sum([sp_convolve(A_2543[i, :, :, j],
A_24232[i, 2, :, j, :], mode)
for i in range(2)]) for j in range(3)]))
def calcM(N, Co, U, V):
GK = U.shape[2]
Ci = U.shape[3]
tiles = V.shape[3]
GN = V.shape[2]
print('calcM cpu GN', GN, 'N', N)
U = U.transpose(0,1,2,4,3).reshape(6,6,GK * 32,Ci)[:,:,:Co,:]
V = V.transpose(
2,6,0,1,5,3,4).reshape(
GN * 32, 6, 6, Ci, tiles, tiles)[:N]
M = np.zeros((N, Co, tiles, tiles, 6, 6), dtype=np.float32)
for n in range(N):
for xi in range(6):
for nu in range(6):
M[n,:, :, :, xi, nu] = np.tensordot(U[xi,nu], V[n,xi,nu], 1)
timecheck('calced M')
return M
def collapse(T, W, divisive=False):
if divisive: W = W / np.sum(np.square(W.reshape(W.shape[0], -1)), 1)[:,None,None,None]
if T.shape[-6] == W.shape[0]: # Z ONLY (after 2nd-stage expansion)
W = np.reshape (W, (1,)*(T.ndim-6) + (W.shape[0],1,1) + W.shape[1:])
T = ne.evaluate('T*W')
T = np.reshape (T, T.shape[:-3] + (np.prod(T.shape[-3:]),))
T = np.sum(T, -1)
else: # X ONLY (conv, before 2nd-stage expansion)
T = np.squeeze (T, -6)
T = np.tensordot(T, W, ([-3,-2,-1], [1,2,3]))
T = np.rollaxis (T, -1, 1)
return T
def backward(self, T, mode='X'):
if 'X' in mode and 'G' in mode:
D = T
X = np.squeeze (self.X, 1)
G = np.tensordot(D, X, ([0,2,3],[0,1,2]))
self.accumulate(G)
if 'X' in mode: T = uncollapse(T, self.W)
else : T = np.sum(T, 1)[:,None]
O = np.zeros((T.shape[0], 1) + (self.sh[2]-self.w[2]+1, self.sh[3]-self.w[3]+1) + tuple(self.w[1:]) + T.shape[7:], dtype='float32')
_ = ne.evaluate('T', out=O[self.S]) #O[self.S] = T
O = unexpand(O)
return O
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
gcol = numpy.tensordot(W, gy, (0, 1))
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
kh, kw = W.shape[2:]
_, _, h, w = x.shape
gcol = numpy.tensordot(W, x, (0, 1))
# - k, m, n: shape of out_channel
# - b: number of inputs
# - h, w: height and width of kernels
# k, m, n, b, h, w -> b, k, m, n, h, w
gcol = numpy.rollaxis(gcol, 3)
if self.outh is None:
self.outh = conv.get_deconv_outsize(h, kh, self.sy, self.ph)
if self.outw is None:
self.outw = conv.get_deconv_outsize(w, kw, self.sx, self.pw)
y = conv.col2im_cpu(
gcol, self.sy, self.sx, self.ph, self.pw, self.outh, self.outw)
# b, k, h, w
if b is not None:
y += b.reshape(1, b.size, 1, 1)
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
kh, kw = W.shape[2:]
col = conv.im2col_cpu(
gy, kh, kw, self.sy, self.sx, self.ph, self.pw)
gW = numpy.tensordot(x, col, ([0, 2, 3], [0, 4, 5]))
gx = numpy.tensordot(col, W, ([1, 2, 3], [1, 2, 3]))
gx = numpy.rollaxis(gx, 3, 1)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def predictor(self, movie_id, user_id):
w = self.getW(user_movies[user_id])
#making predictions part Vq not given
data = copy.deepcopy(self.data[user_id])
probs = np.ones(5)
mx, index = -1, 0
for i in range(5):
calc = 1.0
for j in range(self.F):
temp = np.tensordot(data, self.getW(user_movies[user_id])[j]) + self.featureBias[j]
temp = 1.0 + np.exp(temp)
calc *= temp
probs[i] = calc
if mx < probs[i]:
index = i
mx = probs[i]
return index
function_binary_xnor_convolution_2d.py 文件源码
项目:XNOR-Net
作者: rarilurelo
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
Wb = binarize_cpu(W)
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
gcol = numpy.tensordot(Wb, gy, (0, 1))
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)
y = numpy.tensordot(
self.col, Wb, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)
gcol = numpy.tensordot(Wb, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
Xb = numpy.where(self.col>0,1,self.col).astype(x.dtype, copy=False)
Xb = numpy.where(self.col<0,-1,Xb).astype(x.dtype, copy=False)
Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)
y = numpy.tensordot(
Xb, Wb, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)
gcol = numpy.tensordot(Wb, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def get_roto_translation_matrix(theta, rotation_axis=np.array([1,0,0]), translation=np.array([0, 0, 0])):
n = np.linalg.norm(rotation_axis)
assert not np.abs(n) < 0.001, 'rotation axis too close to zero.'
rot = rotation_axis / n
# rodriguez formula:
cross_prod = np.array([[0, -rot[2], rot[1]],
[rot[2], 0, -rot[0]],
[-rot[1], rot[0], 0]])
rot_part = np.cos(theta) * np.identity(3) + np.sin(theta) * cross_prod + np.tensordot(rot, rot, axes=0)
# transformations parameters
rot_transl = np.identity(4)
rot_transl[:3, :3] = rot_part
rot_transl[:3, 3] = translation
return rot_transl
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
Wb = numpy.where(W>=0, 1, -1).astype(numpy.float32, copy=False)
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
gcol = numpy.tensordot(Wb, gy, (0, 1))
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def g_def(self, v, t, vbar):
v = np.reshape(v, self.vshape)
id_m = np.array([[1, 0], [0, 1]])
ret = []
for k in xrange(len(v)):
t_p = np.tensordot(v[k], v[k], axes=0)
p_vk = id_m - t_p
r_ori = 2 * np.pi * np.random.random()
ret_s = self.nu * np.dot(p_vk, vbar[k]) + self.C * np.dot(p_vk, [np.sin(r_ori), np.cos(r_ori)])
ret_s = np.reshape(ret_s, [2, 1])
ret.append(ret_s)
ret = np.array(ret)
return np.reshape(ret, 2 * len(ret))
def predict(self, tree):
if tr.isleaf(tree):
# output = word vector
try:
tree.vector = self.L[:, self.word_map[tree[0]]]
except:
tree.vector = self.L[:, self.word_map[tr.UNK]]
else:
# calculate output of child nodes
self.predict(tree[0])
self.predict(tree[1])
# compute output
lr = np.hstack([tree[0].vector, tree[1].vector])
tree.vector = np.tanh(
np.tensordot(self.V, np.outer(lr, lr), axes=([1, 2], [0, 1])) +
np.dot(self.W, lr) + self.b)
# softmax
import util
tree.output = util.softmax(np.dot(self.Ws, tree.vector) + self.bs)
label = np.argmax(tree.output)
tree.set_label(str(label))
return tree
function_binary_convolution_2d.py 文件源码
项目:BinaryNetConvolution
作者: rarilurelo
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
Wb = numpy.where(W>=0, 1, -1).astype(numpy.float32, copy=False)
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
gcol = numpy.tensordot(Wb, gy, (0, 1))
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def __pow__(self, other):
if self.data is None:
raise ValueError("No power without ndarray data.")
numpy = import_module('numpy')
free = self.free
marray = self.data
for metric in free:
marray = numpy.tensordot(
marray,
numpy.tensordot(
metric[0]._tensortype.data,
marray,
(1, 0)
),
(0, 0)
)
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
def apply_grad_cartesian_tensor(grad_X, zmat_dist):
"""Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space.
"""
columns = ['bond', 'angle', 'dihedral']
C_dist = zmat_dist.loc[:, columns].values.T
try:
C_dist = C_dist.astype('f8')
C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :])
except (TypeError, AttributeError):
C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :])
cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
return Cartesian(atoms=zmat_dist['atom'],
coords=cart_dist, index=zmat_dist.index)
def forward_cpu(self, inputs):
x, W = inputs[:2]
n_batch, c_in, N = x.shape
b = inputs[2] if len(inputs) == 3 else None
K = self.K
if x.dtype != self.LmI.dtype:
self.LmI = self.LmI.astype(x.dtype)
C = np.empty((n_batch, K, N, c_in), dtype=x.dtype)
chebyshev_matvec_cpu(C, x, K, n_batch, self.LmI)
C = C.transpose((0, 3, 1, 2))
self.C = C
y = np.tensordot(C, W, ((1, 2), (1, 2)))
if b is not None:
y += b
return np.rollaxis(y, 2, 1), # y.shape = (n_batch, c_out, N)
def forward_gpu(self, inputs):
x, W = inputs[:2]
n_batch, c_in, N = x.shape
b = inputs[2] if len(inputs) == 3 else None
xp = cuda.get_array_module(x)
with cuda.get_device(x.data):
K = self.K
LmI_data, LmI_indices, LmI_indptr = self.LmI_tuple
if x.dtype != LmI_data.dtype:
LmI_data = LmI_data.astype(x.dtype)
C = xp.empty((K, N, c_in, n_batch), dtype=x.dtype)
chebyshev_matvec_gpu(C, x, K, n_batch,
LmI_data, LmI_indices, LmI_indptr)
C = C.transpose((3, 2, 0, 1))
self.C = C
y = xp.tensordot(C, W, ((1, 2), (1, 2)))
if b is not None:
y += b
return xp.rollaxis(y, 2, 1), # y.shape = (n_batch, c_out, N)
def test_tensordot(a_shape, b_shape, axes):
a = random_x(a_shape)
b = random_x(b_shape)
sa = COO.from_numpy(a)
sb = COO.from_numpy(b)
assert_eq(np.tensordot(a, b, axes),
sparse.tensordot(sa, sb, axes))
assert_eq(np.tensordot(a, b, axes),
sparse.tensordot(sa, b, axes))
# assert isinstance(sparse.tensordot(sa, b, axes), COO)
assert_eq(np.tensordot(a, b, axes),
sparse.tensordot(a, sb, axes))
# assert isinstance(sparse.tensordot(a, sb, axes), COO)