def test_ormqr(self):
mat1 = torch.randn(10, 10)
mat2 = torch.randn(10, 10)
q, r = torch.qr(mat1)
m, tau = torch.geqrf(mat1)
res1 = torch.mm(q, mat2)
res2, _ = torch.ormqr(m, tau, mat2)
self.assertEqual(res1, res2)
res1 = torch.mm(mat2, q)
res2, _ = torch.ormqr(m, tau, mat2, False)
self.assertEqual(res1, res2)
res1 = torch.mm(q.t(), mat2)
res2, _ = torch.ormqr(m, tau, mat2, True, True)
self.assertEqual(res1, res2)
res1 = torch.mm(mat2, q.t())
res2, _ = torch.ormqr(m, tau, mat2, False, True)
self.assertEqual(res1, res2)
python类mm()的实例源码
def test_inverse(self):
M = torch.randn(5, 5)
MI = torch.inverse(M)
E = torch.eye(5)
self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')
MII = torch.Tensor(5, 5)
torch.inverse(M, out=MII)
self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
self.assertEqual(MII, MI, 0, 'inverse value in-place')
# second call, now that MII is transposed
torch.inverse(M, out=MII)
self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
self.assertEqual(MII, MI, 0, 'inverse value in-place')
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_potrs(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b = torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def forward(ctx, input1, input2, weight, bias=None):
ctx.save_for_backward(input1, input2, weight, bias)
output = input1.new(input1.size(0), weight.size(0))
buff = input1.new()
# compute output scores:
for k, w in enumerate(weight):
torch.mm(input1, w, out=buff)
buff.mul_(input2)
torch.sum(buff, 1, keepdim=True, out=output.narrow(1, k, 1))
if bias is not None:
output.add_(bias.expand_as(output))
return output
def backward(ctx, grad_output):
L, = ctx.saved_variables
if ctx.upper:
L = L.t()
grad_output = grad_output.t()
# make sure not to double-count variation, since
# only half of output matrix is unique
Lbar = grad_output.tril()
P = Potrf.phi(torch.mm(L.t(), Lbar))
S = torch.gesv(P + P.t(), L.t())[0]
S = torch.gesv(S.t(), L.t())[0]
S = Potrf.phi(S)
return S, None
def backward(ctx, grad_output):
vector1, vector2 = ctx.saved_variables
grad_add_matrix = grad_vector1 = grad_vector2 = None
if ctx.needs_input_grad[0]:
grad_add_matrix = maybe_unexpand(grad_output, ctx.add_matrix_size)
if ctx.alpha != 1:
grad_add_matrix = grad_add_matrix.mul(ctx.alpha)
if ctx.needs_input_grad[1]:
grad_vector1 = torch.mv(grad_output, vector2)
if ctx.beta != 1:
grad_vector1 *= ctx.beta
if ctx.needs_input_grad[2]:
# TODO: maybe it's better to do transpose + mv + transpose
grad_vector2 = torch.mm(vector1.unsqueeze(0), grad_output).squeeze(0)
if ctx.beta != 1:
grad_vector2 *= ctx.beta
return grad_add_matrix, grad_vector1, grad_vector2, None, None, None
def updateOutput(self, input):
assert len(input) == 2
a, b = input
assert a.ndimension() == 2 or a.ndimension() == 3
assert a.dim() == b.dim()
if a.ndimension() == 2:
if self.transA:
a = a.t()
if self.transB:
b = b.t()
self.output.resize_(a.size(0), b.size(1))
torch.mm(a, b, out=self.output)
else:
if self.transA:
a = a.transpose(1, 2)
if self.transB:
b = b.transpose(1, 2)
self.output.resize_(a.size(0), a.size(1), b.size(2))
torch.bmm(a, b, out=self.output)
return self.output
def updateOutput(self, input):
self._assertInput(input)
# set up buffer:
if self.buff2 is None:
self.buff2 = input[0].new()
self.buff2.resize_as_(input[1])
# compute output scores:
self.output.resize_(input[0].size(0), self.weight.size(0))
for k in range(self.weight.size(0)):
torch.mm(input[0], self.weight[k], out=self.buff2)
self.buff2.mul_(input[1])
torch.sum(self.buff2, 1, True, out=self.output.narrow(1, k, 1))
if self.bias is not None:
self.output.add_(self.bias.view(1, self.bias.nelement()).expand_as(self.output))
return self.output
def test_saddmm(self):
def test_shape(di, dj, dk):
x = self._gen_sparse(2, 20, [di, dj])[0]
t = self._gen_sparse(2, 20, [di, dk])[0]
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
res = torch.saddmm(alpha, t, beta, x, y)
expected = torch.addmm(alpha, t.to_dense(), beta, x.to_dense(), y)
self.assertEqual(res.to_dense(), expected)
res = torch.saddmm(t, x, y)
expected = torch.addmm(t.to_dense(), x.to_dense(), y)
self.assertEqual(res.to_dense(), expected)
res = torch.smm(x, y)
expected = torch.mm(x.to_dense(), y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3)
test_shape(1000, 100, 100)
test_shape(3000, 64, 300)
def test_ormqr(self):
mat1 = torch.randn(10, 10)
mat2 = torch.randn(10, 10)
q, r = torch.qr(mat1)
m, tau = torch.geqrf(mat1)
res1 = torch.mm(q, mat2)
res2, _ = torch.ormqr(m, tau, mat2)
self.assertEqual(res1, res2)
res1 = torch.mm(mat2, q)
res2, _ = torch.ormqr(m, tau, mat2, False)
self.assertEqual(res1, res2)
res1 = torch.mm(q.t(), mat2)
res2, _ = torch.ormqr(m, tau, mat2, True, True)
self.assertEqual(res1, res2)
res1 = torch.mm(mat2, q.t())
res2, _ = torch.ormqr(m, tau, mat2, False, True)
self.assertEqual(res1, res2)
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_potrs(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b = torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def test_forward_inv_mm():
for n_cols in [2, 3, 4]:
a = torch.Tensor([
[5, -3, 0],
[-3, 5, 0],
[0, 0, 2],
])
b = torch.randn(3, n_cols)
actual = a.inverse().mm(b)
a_var = Variable(a)
b_var = Variable(b)
out_var = gpytorch.inv_matmul(a_var, b_var)
res = out_var.data
assert(torch.norm(actual - res) < 1e-4)
def solve_kkt(Ks, K, Ktildes, Ktilde,
rx, rs, rz, ry, niter=1):
nBatch = len(Ks)
nz = rx.size(1)
nineq = rz.size(1)
neq = ry.size(1)
r = -torch.cat((rx, rs, rz, ry), 1)
l = torch.spbqrfactsolve(*([r] + Ktilde))
res = torch.stack([r[i] - torch.mm(Ks[i], l[i].unsqueeze(1))
for i in range(nBatch)])
for k in range(niter):
d = torch.spbqrfactsolve(*([res] + Ktilde))
l = l + d
res = torch.stack([r[i] - torch.mm(Ks[i], l[i].unsqueeze(1))
for i in range(nBatch)])
solx = l[:, :nz]
sols = l[:, nz:nz + nineq]
solz = l[:, nz + nineq:nz + 2 * nineq]
soly = l[:, nz + 2 * nineq:nz + 2 * nineq + neq]
return solx, sols, solz, soly
def getSocialTensor(self, grid, hidden_states):
'''
Computes the social tensor for a given grid mask and hidden states of all peds
params:
grid : Grid masks
hidden_states : Hidden states of all peds
'''
# Number of peds
numNodes = grid.size()[0]
# Construct the variable
social_tensor = Variable(torch.zeros(numNodes, self.grid_size*self.grid_size, self.rnn_size).cuda())
# For each ped
for node in range(numNodes):
# Compute the social tensor
social_tensor[node] = torch.mm(torch.t(grid[node]), hidden_states)
# Reshape the social tensor
social_tensor = social_tensor.view(numNodes, self.grid_size*self.grid_size*self.rnn_size)
return social_tensor
def forward(self, qu, w, cand):
qu = Variable(qu)
w = Variable(w)
cand = Variable(cand)
embed_q = self.embed_B(qu)
embed_w1 = self.embed_A(w)
embed_w2 = self.embed_C(w)
embed_c = self.embed_C(cand)
#pdb.set_trace()
q_state = torch.sum(embed_q, 1).squeeze(1)
w1_state = torch.sum(embed_w1, 1).squeeze(1)
w2_state = torch.sum(embed_w2, 1).squeeze(1)
for _ in range(self.config.hop):
sent_dot = torch.mm(q_state, torch.transpose(w1_state, 0, 1))
sent_att = F.softmax(sent_dot)
a_dot = torch.mm(sent_att, w2_state)
a_dot = self.H(a_dot)
q_state = torch.add(a_dot, q_state)
f_feat = torch.mm(q_state, torch.transpose(embed_c, 0, 1))
score = F.log_softmax(f_feat)
return score
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: initial hidden, where the size of the state is
(batch, hidden_size).
Returns:
newh: Tensors containing the next hidden state.
"""
batch_size = hx.size(0)
Ux = torch.mm(input_, self.U)
hx = Ux + hx
newh = self._EUNN(hx=hx, thetaA=self.thetaA, thetaB=self.thetaB)
newh = self._modReLU(newh, self.bias)
return newh
def forward(ctx, input1, input2, weight, bias=None):
ctx.save_for_backward(input1, input2, weight, bias)
output = input1.new(input1.size(0), weight.size(0))
buff = input1.new()
# compute output scores:
for k, w in enumerate(weight):
torch.mm(input1, w, out=buff)
buff.mul_(input2)
torch.sum(buff, 1, keepdim=True, out=output.narrow(1, k, 1))
if bias is not None:
output.add_(bias.expand_as(output))
return output
def updateOutput(self, input):
assert len(input) == 2
a, b = input
assert a.ndimension() == 2 or a.ndimension() == 3
assert a.dim() == b.dim()
if a.ndimension() == 2:
if self.transA:
a = a.t()
if self.transB:
b = b.t()
self.output.resize_(a.size(0), b.size(1))
torch.mm(a, b, out=self.output)
else:
if self.transA:
a = a.transpose(1, 2)
if self.transB:
b = b.transpose(1, 2)
self.output.resize_(a.size(0), a.size(1), b.size(2))
torch.bmm(a, b, out=self.output)
return self.output