def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x1: first input tensor
x2: second input tensor
p: the norm degree. Default: 2
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
return torch.pow(out, 1. / p)
python类norm()的实例源码
def updateOutput(self, input):
assert input.dim() == 2
inputSize = self.weight.size(1)
outputSize = self.weight.size(0)
if self._weightNorm is None:
self._weightNorm = self.weight.new()
if self._inputNorm is None:
self._inputNorm = self.weight.new()
# y_j = (w_j * x) / ( || w_j || * || x || )
torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)
batchSize = input.size(0)
nelement = self.output.nelement()
self.output.resize_(batchSize, outputSize)
if self.output.nelement() != nelement:
self.output.zero_()
self.output.addmm_(0., 1., input, self.weight.t())
torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
self.output.div_(self._inputNorm.expand_as(self.output))
return self.output
def getMagnitudeAndDirection(*args):
'''
Gets the magnitude and direction of the vector corresponding to positions
params:
args: Can be a list of two positions or the two positions themselves (variable-length argument)
'''
if len(args) == 1:
pos_list = args[0]
pos_i = pos_list[0]
pos_j = pos_list[1]
vector = np.array(pos_i) - np.array(pos_j)
magnitude = np.linalg.norm(vector)
if abs(magnitude) > 1e-4:
direction = vector / magnitude
else:
direction = vector
return [magnitude] + direction.tolist()
elif len(args) == 2:
pos_i = args[0]
pos_j = args[1]
ret = torch.zeros(3)
vector = pos_i - pos_j
magnitude = torch.norm(vector)
if abs(magnitude) > 1e-4:
direction = vector / magnitude
else:
direction = vector
ret[0] = magnitude
ret[1:3] = direction
return ret
else:
raise NotImplementedError('getMagnitudeAndDirection: Function signature incorrect')
def get_mean_error(ret_nodes, nodes, assumedNodesPresent, trueNodesPresent):
'''
Computes average displacement error
Parameters
==========
ret_nodes : A tensor of shape pred_length x numNodes x 2
Contains the predicted positions for the nodes
nodes : A tensor of shape pred_length x numNodes x 2
Contains the true positions for the nodes
nodesPresent : A list of lists, of size pred_length
Each list contains the nodeIDs of the nodes present at that time-step
Returns
=======
Error : Mean euclidean distance between predicted trajectory and the true trajectory
'''
pred_length = ret_nodes.size()[0]
error = torch.zeros(pred_length).cuda()
counter = 0
for tstep in range(pred_length):
counter = 0
for nodeID in assumedNodesPresent:
if nodeID not in trueNodesPresent[tstep]:
continue
pred_pos = ret_nodes[tstep, nodeID, :]
true_pos = nodes[tstep, nodeID, :]
error[tstep] += torch.norm(pred_pos - true_pos, p=2)
counter += 1
if counter != 0:
error[tstep] = error[tstep] / counter
return torch.mean(error)
def get_final_error(ret_nodes, nodes, assumedNodesPresent, trueNodesPresent):
'''
Computes final displacement error
Parameters
==========
ret_nodes : A tensor of shape pred_length x numNodes x 2
Contains the predicted positions for the nodes
nodes : A tensor of shape pred_length x numNodes x 2
Contains the true positions for the nodes
nodesPresent : A list of lists, of size pred_length
Each list contains the nodeIDs of the nodes present at that time-step
Returns
=======
Error : Mean final euclidean distance between predicted trajectory and the true trajectory
'''
pred_length = ret_nodes.size()[0]
error = 0
counter = 0
# Last time-step
tstep = pred_length - 1
for nodeID in assumedNodesPresent:
if nodeID not in trueNodesPresent[tstep]:
continue
pred_pos = ret_nodes[tstep, nodeID, :]
true_pos = nodes[tstep, nodeID, :]
error += torch.norm(pred_pos - true_pos, p=2)
counter += 1
if counter != 0:
error = error / counter
return error
def weight_proj_l2norm(param):
norm = torch.norm(param.data, p=2) + 1e-8
coeff = min(opt.wproj_upper, 1.0/norm)
param.data.mul_(coeff)
# custom weights initialization called on netG and netD
def pose_loss(input, target):
x = torch.norm(input-target, dim=1)
x = torch.mean(x)
return x
def rotation_error(input, target):
x1 = torch.norm(input, dim=1)
x2 = torch.norm(target, dim=1)
x1 = torch.div(input, torch.stack((x1, x1, x1, x1), dim=1))
x2 = torch.div(target, torch.stack((x2, x2, x2, x2), dim=1))
d = torch.abs(torch.sum(x1 * x2, dim=1))
theta = 2 * torch.acos(d) * 180/math.pi
theta = torch.mean(theta)
return theta
def pose_loss(input, target):
"""Gets l2 loss between input and target"""
x = torch.norm(input-target, dim=1)
x = torch.mean(x)
return x
def rotation_error(input, target):
"""Gets cosine distance between input and target """
x1 = torch.norm(input, dim=1)
x2 = torch.norm(target, dim=1)
x1 = torch.div(input, torch.stack((x1, x1, x1, x1), dim=1))
x2 = torch.div(target, torch.stack((x2, x2, x2, x2), dim=1))
d = torch.abs(torch.sum(x1 * x2, dim=1))
theta = 2 * torch.acos(d) * 180/math.pi
theta = torch.mean(theta)
return theta
def forward(self, inpt):
batch_size = self.batch_size
f0 = self.features(inpt[:, 0])
f0 = f0.view(batch_size, -1)
f1 = self.features(inpt[:, 1])
f1 = f1.view(batch_size, -1)
# f2 = self.features(inpt[:, 2])
# f2 = f2.view(batch_size, -1)
#
# f3 = self.features(inpt[:, 3])
# f3 = f3.view(batch_size, -1)
#
# f4 = self.features(inpt[:, 4])
# f4 = f4.view(batch_size, -1)
#
# f = torch.stack((f0, f1, f2, f3, f4), dim=0).view(self.seq_length, batch_size, -1)
f = torch.cat((f0, f1), dim=1)
# _, hn = self.rnn(f, self.hidden)
# hn = hn[self.gru_layer - 1].view(batch_size, -1)
# hn = self.relu(hn)
# hn = self.dropout(hn)
# hn = self.regressor(hn)
hn = self.regressor(f)
trans = self.trans_regressor(hn)
# trans_norm = torch.norm(trans, dim=1)
# trans = torch.div(trans, torch.cat((trans_norm, trans_norm, trans_norm), dim=1))
scale = self.scale_regressor(hn)
rotation = self.rotation_regressor(hn)
return trans, scale, rotation
def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x1: first input tensor
x2: second input tensor
p: the norm degree. Default: 2
eps (float, optional): Small value to avoid division by zero. Default: 1e-6
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
Example::
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
return torch.pow(out, 1. / p)
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
r"""Returns cosine similarity between x1 and x2, computed along dim.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
Args:
x1 (Variable): First input.
x2 (Variable): Second input (of size matching x1).
dim (int, optional): Dimension of vectors. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
- Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
Example::
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.cosine_similarity(input1, input2)
>>> print(output)
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def updateOutput(self, input):
assert input.dim() == 2
inputSize = self.weight.size(1)
outputSize = self.weight.size(0)
if self._weightNorm is None:
self._weightNorm = self.weight.new()
if self._inputNorm is None:
self._inputNorm = self.weight.new()
# y_j = (w_j * x) / ( || w_j || * || x || )
torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)
batchSize = input.size(0)
nelement = self.output.nelement()
self.output.resize_(batchSize, outputSize)
if self.output.nelement() != nelement:
self.output.zero_()
self.output.addmm_(0., 1., input, self.weight.t())
torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
self.output.div_(self._inputNorm.expand_as(self.output))
return self.output
def test_computes_radial_basis_function():
a = torch.Tensor([4, 2, 8]).view(3, 1)
b = torch.Tensor([0, 2, 2]).view(3, 1)
lengthscale = 2
kernel = RBFKernel().initialize(log_lengthscale=math.log(lengthscale))
kernel.eval()
actual = torch.Tensor([
[16, 4, 4],
[4, 0, 0],
[64, 36, 36],
]).mul_(-1).div_(lengthscale).exp()
res = kernel(Variable(a), Variable(b)).data
assert(torch.norm(res - actual) < 1e-5)
def test_inv_matmul():
mat = torch.randn(4, 4)
res = make_mul_lazy_var()[0].inv_matmul(Variable(mat))
assert torch.norm(res.data - (t1_t2_t3_eval + added_diag.diag()).inverse().matmul(mat)) < 1e-3
def test_matmul_deterministic():
mat = torch.randn(4, 4)
res = make_mul_lazy_var()[0].matmul(Variable(mat))
assert torch.norm(res.data - (t1_t2_t3_eval + added_diag.diag()).matmul(mat)) < 1e-3
def test_matmul_approx():
class KissGPModel(gpytorch.GridInducingPointModule):
def __init__(self):
super(KissGPModel, self).__init__(grid_size=300, grid_bounds=[(0, 1)])
self.mean_module = ConstantMean(constant_bounds=(-1, 1))
covar_module = RBFKernel(log_lengthscale_bounds=(-100, 100))
covar_module.log_lengthscale.data = torch.FloatTensor([-2])
self.covar_module = covar_module
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return GaussianRandomVariable(mean_x, covar_x)
model = KissGPModel()
n = 100
d = 4
lazy_var_list = []
lazy_var_eval_list = []
for i in range(d):
x = Variable(torch.rand(n))
y = Variable(torch.rand(n))
model.condition(x, y)
toeplitz_var = model(x).covar()
lazy_var_list.append(toeplitz_var)
lazy_var_eval_list.append(toeplitz_var.evaluate().data)
mul_lazy_var = MulLazyVariable(*lazy_var_list, matmul_mode='approximate', max_iter=30)
mul_lazy_var_eval = torch.ones(n, n)
for i in range(d):
mul_lazy_var_eval *= (lazy_var_eval_list[i].matmul(torch.eye(lazy_var_eval_list[i].size()[0])))
vec = torch.randn(n)
actual = mul_lazy_var_eval.matmul(vec)
res = mul_lazy_var.matmul(Variable(vec)).data
assert torch.norm(actual - res) / torch.norm(actual) < 1e-2
def test_trace_log_det_quad_form():
mu_diffs_var = Variable(torch.arange(1, 5, 1))
chol_covar_1_var = Variable(torch.eye(4))
# Test case
c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
c2_var = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
c3_var = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
diag_var = Variable(torch.Tensor([1]), requires_grad=True)
diag_var_expand = diag_var.expand(4)
toeplitz_1 = ToeplitzLazyVariable(c1_var).evaluate()
kronecker_product = KroneckerProductLazyVariable(c2_var).evaluate()
toeplitz_2 = ToeplitzLazyVariable(c3_var).evaluate()
actual = toeplitz_1 * kronecker_product * toeplitz_2 + diag_var_expand.diag()
# Actual case
mul_lv, diag = make_mul_lazy_var()
t1, t2, t3 = mul_lv.lazy_vars
# Test forward
tldqf_res = mul_lv.trace_log_det_quad_form(mu_diffs_var, chol_covar_1_var)
tldqf_actual = gpytorch._trace_logdet_quad_form_factory_class()(mu_diffs_var, chol_covar_1_var, actual)
assert(math.fabs(tldqf_res.data.squeeze()[0] - tldqf_actual.data.squeeze()[0]) < 1.5)
# Test backwards
tldqf_res.backward()
tldqf_actual.backward()
assert((c1_var.grad.data - t1.column.grad.data).abs().norm() / c1_var.grad.data.abs().norm() < 1e-1)
assert((c2_var.grad.data - t2.columns.grad.data).abs().norm() / c2_var.grad.data.abs().norm() < 1e-1)
assert((c3_var.grad.data - t3.column.grad.data).abs().norm() / c3_var.grad.data.abs().norm() < 1e-1)
assert((diag_var.grad.data - diag.grad.data).abs().norm() / diag_var.grad.data.abs().norm() < 1e-1)
def test_getitem():
res = make_mul_lazy_var()[0][1, 1]
assert torch.norm(res.evaluate().data - (t1_t2_t3_eval + torch.ones(4))[1, 1]) < 1e-3