def test_dot(self):
types = {
'torch.DoubleTensor': 1e-8,
'torch.FloatTensor': 1e-4,
}
for tname, _prec in types.items():
v1 = torch.randn(100).type(tname)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1, v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
# Test 0-strided
for tname, _prec in types.items():
v1 = torch.randn(1).type(tname).expand(100)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1, v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
python类dot()的实例源码
def test(self, dataset):
self.model.eval()
total_loss = 0
predictions = torch.zeros(len(dataset))
indices = torch.arange(1, dataset.num_classes + 1)
for idx in tqdm(range(len(dataset)),desc='Testing epoch ' + str(self.epoch) + ''):
ltree, lsent, rtree, rsent, label = dataset[idx]
linput, rinput = Var(lsent, volatile=True), Var(rsent, volatile=True)
target = Var(map_label_to_target(label, dataset.num_classes), volatile=True)
if self.args.cuda:
linput, rinput = linput.cuda(), rinput.cuda()
target = target.cuda()
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.data[0]
output = output.data.squeeze().cpu()
predictions[idx] = torch.dot(indices, torch.exp(output))
return total_loss / len(dataset), predictions
def test(self, dataset):
self.model.eval()
self.embedding_model.eval()
loss = 0
accuracies = torch.zeros(len(dataset))
output_trees = []
outputs = []
for idx in tqdm(range(len(dataset)), desc='Testing epoch '+str(self.epoch)+''):
tree, sent, label = dataset[idx]
input = Var(sent, volatile=True)
target = Var(torch.LongTensor([int(label)]), volatile=True)
if self.args.cuda:
input = input.cuda()
target = target.cuda()
emb = F.torch.unsqueeze(self.embedding_model(input),1)
output, _, acc, tree = self.model(tree, emb)
err = self.criterion(output, target)
loss += err.data[0]
accuracies[idx] = acc
output_trees.append(tree)
outputs.append(tree.output_softmax.data.numpy())
# predictions[idx] = torch.dot(indices,torch.exp(output.data.cpu()))
return loss/len(dataset), accuracies, outputs, output_trees
def test(self, dataset):
self.model.eval()
loss = 0
predictions = torch.zeros(len(dataset))
indices = torch.arange(1, dataset.num_classes + 1)
for idx in tqdm(range(len(dataset)), desc='Testing epoch ' + str(self.epoch) + ''):
ltree, lsent, rtree, rsent, label = dataset[idx]
linput, rinput = Var(lsent, volatile=True), Var(rsent, volatile=True)
target = Var(map_label_to_target(label, dataset.num_classes), volatile=True)
if self.args.cuda:
linput, rinput = linput.cuda(), rinput.cuda()
target = target.cuda()
output = self.model(ltree, linput, rtree, rinput)
err = self.criterion(output, target)
loss += err.data[0]
predictions[idx] = torch.dot(indices, torch.exp(output.data.cpu()))
return loss / len(dataset), predictions
def dice_error(input, target):
eps = 0.000001
_, result_ = input.max(1)
result_ = torch.squeeze(result_)
if input.is_cuda:
result = torch.cuda.FloatTensor(result_.size())
target_ = torch.cuda.FloatTensor(target.size())
else:
result = torch.FloatTensor(result_.size())
target_ = torch.FloatTensor(target.size())
result.copy_(result_.data)
target_.copy_(target.data)
target = target_
intersect = torch.dot(result, target)
result_sum = torch.sum(result)
target_sum = torch.sum(target)
union = result_sum + target_sum + 2*eps
intersect = np.max([eps, intersect])
# the target volume can be empty - so we still want to
# end up with a score of 1 if the result is 0/0
IoU = intersect / union
# print('union: {:.3f}\t intersect: {:.6f}\t target_sum: {:.0f} IoU: result_sum: {:.0f} IoU {:.7f}'.format(
# union, intersect, target_sum, result_sum, 2*IoU))
return 2*IoU
def test(self, dataset):
self.model.eval()
self.embedding_model.eval()
loss = 0
predictions = torch.zeros(len(dataset))
predictions = predictions
indices = torch.range(1,dataset.num_classes)
for idx in tqdm(xrange(len(dataset)),desc='Testing epoch '+str(self.epoch)+''):
tree, sent, label = dataset[idx]
input = Var(sent, volatile=True)
target = Var(map_label_to_target_sentiment(label,dataset.num_classes, fine_grain=self.args.fine_grain), volatile=True)
if self.args.cuda:
input = input.cuda()
target = target.cuda()
emb = F.torch.unsqueeze(self.embedding_model(input),1)
output, _ = self.model(tree, emb) # size(1,5)
err = self.criterion(output, target)
loss += err.data[0]
output[:,1] = -9999 # no need middle (neutral) value
val, pred = torch.max(output, 1)
predictions[idx] = pred.data.cpu()[0][0]
# predictions[idx] = torch.dot(indices,torch.exp(output.data.cpu()))
return loss/len(dataset), predictions
def test(self, dataset):
self.model.eval()
loss = 0
predictions = torch.zeros(len(dataset))
indices = torch.range(1,dataset.num_classes)
for idx in tqdm(xrange(len(dataset)),desc='Testing epoch '+str(self.epoch)+''):
ltree,lsent,rtree,rsent,label = dataset[idx]
linput, rinput = Var(lsent, volatile=True), Var(rsent, volatile=True)
target = Var(map_label_to_target(label,dataset.num_classes), volatile=True)
if self.args.cuda:
linput, rinput = linput.cuda(), rinput.cuda()
target = target.cuda()
output = self.model(ltree,linput,rtree,rinput)
err = self.criterion(output, target)
loss += err.data[0]
predictions[idx] = torch.dot(indices,torch.exp(output.data.cpu()))
return loss/len(dataset), predictions
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
self.buffer = self.buffer or input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(buffer, input, self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = torch.dot(target, buffer)
# log(1 - input) * (1 - target)
torch.mul(buffer, input, -1).add_(1+self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target, buffer)
if self.sizeAverage:
output = output / input.nelement()
self.output = - output
return self.output
def test_dot(self):
types = {
'torch.DoubleTensor': 1e-8,
'torch.FloatTensor': 1e-4,
}
for tname, prec in types.items():
v1 = torch.randn(100).type(tname)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1,v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
def test_conv2(self):
x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100)))
k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20)))
imvc = torch.conv2(x, k)
imvc2 = torch.conv2(x, k, 'V')
imfc = torch.conv2(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size()-1, 0, -1):
kis[ks.size()-i+1] = ks[i]
#for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end
imvx = torch.xcorr2(x, ki)
imvx2 = torch.xcorr2(x, ki, 'V')
imfx = torch.xcorr2(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv2')
self.assertEqual(imvc, imvx, 0, 'torch.conv2')
self.assertEqual(imvc, imvx2, 0, 'torch.conv2')
self.assertEqual(imfc, imfx, 0, 'torch.conv2')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2')
xx = torch.Tensor(2, x.size(1), x.size(2))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv2(xx, kk)
immvc2 = torch.conv2(xx, kk, 'V')
immfc = torch.conv2(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv2')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv2')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv2')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv2')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv2')
def symbolic_kernel(self, X):
if self.kernel_type == 'linear':
K = self.alpha * torch.dot(X, self.X_kernel.transpose(0, 1)) + self.c
elif self.kernel_type == 'poly':
K = (self.alpha * torch.dot(X, self.X_kernel.transpose(0, 1)) + self.c) ** self.degree
elif self.kernel_type == 'rbf':
D = sym_distance_matrix(X, self.X_kernel, self_similarity=False)
K = torch.exp(-D ** 2 / (self.sigma_kernel ** 2))
else:
raise Exception('Unknown kernel type: ', self.kernel_type)
return K
def forward(self, input):
weights = self.weight.view(self.inp, self.outp * self.kw) # weights applied to all
bias = self.bias
nOutputFrame = int((input.size(0) - self.kw) / self.dw + 1)
output = Variable(torch.FloatTensor(nOutputFrame, self.outp))
for i in range(input.size(0)): # do -- for each sequence element
element = input[i] # ; -- features of ith sequence element
output[i] = torch.dot(element, weights) + bias
return output
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(input, self.eps, out=buffer).log_()
if weights is not None:
buffer.mul_(weights)
output = torch.dot(target, buffer)
# log(1 - input) * (1 - target)
torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target, buffer)
if self.sizeAverage:
output = output / input.nelement()
self.output = - output
return self.output
def test_dot(self):
types = {
'torch.DoubleTensor': 1e-8,
'torch.FloatTensor': 1e-4,
}
for tname, _prec in types.items():
v1 = torch.randn(100).type(tname)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1, v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
def test_conv2(self):
x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100)))
k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20)))
imvc = torch.conv2(x, k)
imvc2 = torch.conv2(x, k, 'V')
imfc = torch.conv2(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
# for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end
imvx = torch.xcorr2(x, ki)
imvx2 = torch.xcorr2(x, ki, 'V')
imfx = torch.xcorr2(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv2')
self.assertEqual(imvc, imvx, 0, 'torch.conv2')
self.assertEqual(imvc, imvx2, 0, 'torch.conv2')
self.assertEqual(imfc, imfx, 0, 'torch.conv2')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2')
xx = torch.Tensor(2, x.size(1), x.size(2))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv2(xx, kk)
immvc2 = torch.conv2(xx, kk, 'V')
immfc = torch.conv2(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv2')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv2')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv2')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv2')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv2')
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(input, self.eps, out=buffer).log_()
if weights is not None:
buffer.mul_(weights)
output = torch.dot(target, buffer)
# log(1 - input) * (1 - target)
torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target, buffer)
if self.sizeAverage:
output = output / input.nelement()
self.output = - output
return self.output
def test_dot(self):
types = {
'torch.DoubleTensor': 1e-8,
'torch.FloatTensor': 1e-4,
}
for tname, _prec in types.items():
v1 = torch.randn(100).type(tname)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1, v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
def test_conv2(self):
x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100)))
k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20)))
imvc = torch.conv2(x, k)
imvc2 = torch.conv2(x, k, 'V')
imfc = torch.conv2(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
# for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end
imvx = torch.xcorr2(x, ki)
imvx2 = torch.xcorr2(x, ki, 'V')
imfx = torch.xcorr2(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv2')
self.assertEqual(imvc, imvx, 0, 'torch.conv2')
self.assertEqual(imvc, imvx2, 0, 'torch.conv2')
self.assertEqual(imfc, imfx, 0, 'torch.conv2')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2')
xx = torch.Tensor(2, x.size(1), x.size(2))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv2(xx, kk)
immvc2 = torch.conv2(xx, kk, 'V')
immfc = torch.conv2(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv2')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv2')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv2')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv2')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv2')
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(input, self.eps, out=buffer).log_()
if weights is not None:
buffer.mul_(weights)
target_1d = target.contiguous().view(-1)
# don't save a 1-d view of buffer: it should already be contiguous, and it's
# used as non-1d tensor later.
output = torch.dot(target_1d, buffer.contiguous().view(-1))
# log(1 - input) * (1 - target)
torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target_1d, buffer.contiguous().view(-1))
if self.sizeAverage:
output = output / input.nelement()
self.output = - output
return self.output
def test_dot(self):
types = {
'torch.DoubleTensor': 1e-8,
'torch.FloatTensor': 1e-4,
}
for tname, _prec in types.items():
v1 = torch.randn(100).type(tname)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1, v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
def test_conv2(self):
x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100)))
k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20)))
imvc = torch.conv2(x, k)
imvc2 = torch.conv2(x, k, 'V')
imfc = torch.conv2(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
# for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end
imvx = torch.xcorr2(x, ki)
imvx2 = torch.xcorr2(x, ki, 'V')
imfx = torch.xcorr2(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv2')
self.assertEqual(imvc, imvx, 0, 'torch.conv2')
self.assertEqual(imvc, imvx2, 0, 'torch.conv2')
self.assertEqual(imfc, imfx, 0, 'torch.conv2')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2')
xx = torch.Tensor(2, x.size(1), x.size(2))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv2(xx, kk)
immvc2 = torch.conv2(xx, kk, 'V')
immfc = torch.conv2(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv2')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv2')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv2')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv2')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv2')
def after_apply(self):
# compute running average of gradient and norm of gradient
beta = self._beta
global_state = self._global_state
if self._iter == 0:
global_state["grad_norm_squared_avg"] = 0.0
global_state["grad_norm_squared"] = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
# global_state['grad_norm_squared'] += torch.dot(grad, grad)
global_state['grad_norm_squared'] += torch.sum(grad * grad)
global_state['grad_norm_squared_avg'] = \
global_state['grad_norm_squared_avg'] * beta + (1 - beta) * global_state['grad_norm_squared']
# global_state['grad_norm_squared_avg'].mul_(beta).add_(1 - beta, global_state['grad_norm_squared'] )
self.curvature_range()
self.grad_variance()
self.dist_to_opt()
if self._iter > 0:
self.get_mu()
self.get_lr()
self._lr = beta * self._lr + (1 - beta) * self._lr_t
self._mu = beta * self._mu + (1 - beta) * self._mu_t
return
def test_net_forward(self):
model = Net()
print(model)
self.assertEqual(model.conv1.out_channels, model.conv2.out_channels)
self.assertEqual(model.conv1.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv2.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv3.out_channels, model.conv4.in_channels)
# simple forward pass
input = Variable(torch.rand(1, 1, 4) * 2 - 1)
output = model(input)
self.assertEqual(output.size(), (1, 2, 4))
# feature split
model.conv1.split_feature(feature_i=1)
model.conv2.split_feature(feature_i=3)
print(model)
self.assertEqual(model.conv1.out_channels, model.conv2.out_channels)
self.assertEqual(model.conv1.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv2.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv3.out_channels, model.conv4.in_channels)
output2 = model(input)
diff = output - output2
dot = torch.dot(diff.view(-1), diff.view(-1))
# should be close to 0
#self.assertTrue(np.isclose(dot.data[0], 0., atol=1e-2))
print("mse: ", dot.data[0])
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(input, self.eps, out=buffer).log_()
if weights is not None:
buffer.mul_(weights)
target_1d = target.contiguous().view(-1)
# don't save a 1-d view of buffer: it should already be contiguous, and it's
# used as non-1d tensor later.
output = torch.dot(target_1d, buffer.contiguous().view(-1))
# log(1 - input) * (1 - target)
torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target_1d, buffer.contiguous().view(-1))
if self.sizeAverage:
output = output / input.nelement()
self.output = - output
return self.output
def test_conv2(self):
x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100)))
k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20)))
imvc = torch.conv2(x, k)
imvc2 = torch.conv2(x, k, 'V')
imfc = torch.conv2(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
# for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end
imvx = torch.xcorr2(x, ki)
imvx2 = torch.xcorr2(x, ki, 'V')
imfx = torch.xcorr2(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv2')
self.assertEqual(imvc, imvx, 0, 'torch.conv2')
self.assertEqual(imvc, imvx2, 0, 'torch.conv2')
self.assertEqual(imfc, imfx, 0, 'torch.conv2')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2')
xx = torch.Tensor(2, x.size(1), x.size(2))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv2(xx, kk)
immvc2 = torch.conv2(xx, kk, 'V')
immfc = torch.conv2(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv2')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv2')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv2')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv2')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv2')
def score(self, hidden, encoder_output):
if self.method == 'dot':
energy = torch.dot(hidden.view(-1), encoder_output.view(-1))
return energy
elif self.method == 'general':
energy = self.attn(encoder_output)
energy = torch.dot(hidden.view(-1), encoder_output.view(-1))
return energy
def cos_distance(self, a, b):
return torch.dot(a, b)/(torch.norm(a)*torch.norm(b))
def dot(x, y):
def _dot(X):
x, y = X
x_ndim = ndim(x)
y_ndim = ndim(y)
if x_ndim == 2 and y_ndim == 2:
return torch.mm(x, y)
if x_ndim == 2 and y_ndim == 1:
return torch.mv(x, y)
if x_ndim == 1 and y_ndim == 2:
return torch.mv(y, x)
if x_ndim == 1 and y_ndim == 1:
return torch.dot(x, y)
else:
raise Exception('Unsupported tensor ranks for dot operation : ' + str(x_ndim) + ' and ' + str(y_ndim) + '.')
def _compute_output_shape(X):
x, y = _get_shape(X[0]), _get_shape(X[1])
x_ndim = len(x)
y_ndim = len(y)
if x_ndim == 2 and y_ndim == 2:
return (x[0], y[1])
if x_ndim == 2 and y_ndim == 1:
return (x[0],)
if x_ndim == 1 and y_ndim == 2:
return (y[0],)
if x_ndim == 1 and y_ndim == 1:
return (0,)
return get_op(_dot, output_shape=_compute_output_shape)([x, y])
def forward(self, input, target, save=True):
if save:
self.save_for_backward(input, target)
eps = 0.000001
_, result_ = input.max(1)
result_ = torch.squeeze(result_)
if input.is_cuda:
result = torch.cuda.FloatTensor(result_.size())
self.target_ = torch.cuda.FloatTensor(target.size())
else:
result = torch.FloatTensor(result_.size())
self.target_ = torch.FloatTensor(target.size())
result.copy_(result_)
self.target_.copy_(target)
target = self.target_
# print(input)
intersect = torch.dot(result, target)
# binary values so sum the same as sum of squares
result_sum = torch.sum(result)
target_sum = torch.sum(target)
union = result_sum + target_sum + (2*eps)
# the target volume can be empty - so we still want to
# end up with a score of 1 if the result is 0/0
IoU = intersect / union
print('union: {:.3f}\t intersect: {:.6f}\t target_sum: {:.0f} IoU: result_sum: {:.0f} IoU {:.7f}'.format(
union, intersect, target_sum, result_sum, 2*IoU))
out = torch.FloatTensor(1).fill_(2*IoU)
self.intersect, self.union = intersect, union
return out
def lovasz_binary(margins, label, prox=False, max_steps=20, debug={}):
# 1d vector inputs
# Workaround: can't sort Variable bug
# prox: False or lambda regularization value
_, perm = torch.sort(margins.data, dim=0, descending=True)
margins_sorted = margins[perm]
grad = gamma_fast(label, perm)
loss = torch.dot(F.relu(margins_sorted), Variable(grad))
if prox is not False:
xp, gam = find_proximal(margins_sorted.data, grad, prox, max_steps=max_steps, eps=1e-6, debug=debug)
hook = margins_sorted.register_hook(lambda grad: Variable(margins_sorted.data - xp))
return loss, hook, gam
else:
return loss