def test_conv3(self):
x = torch.rand(math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)))
k = torch.rand(math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)))
imvc = torch.conv3(x, k)
imvc2 = torch.conv3(x, k, 'V')
imfc = torch.conv3(x, k, 'F')
ki = k.clone();
ks = k.storage()
kis = ki.storage()
for i in range(ks.size()-1, 0, -1):
kis[ks.size()-i+1] = ks[i]
imvx = torch.xcorr3(x, ki)
imvx2 = torch.xcorr3(x, ki, 'V')
imfx = torch.xcorr3(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv3')
self.assertEqual(imvc, imvx, 0, 'torch.conv3')
self.assertEqual(imvc, imvx2, 0, 'torch.conv3')
self.assertEqual(imfc, imfx, 0, 'torch.conv3')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3')
xx = torch.Tensor(2, x.size(1), x.size(2), x.size(3))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2), k.size(3))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv3(xx, kk)
immvc2 = torch.conv3(xx, kk, 'V')
immfc = torch.conv3(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv3')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv3')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv3')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv3')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv3')
python类dot()的实例源码
def __init__(self, data, input_dimensionality, output_dimensionality, kernel_type='rbf',
degree=2, sigma=0, kernel_scaling=1, c=1, scaler=None):
"""
Creates a Kernel SEF object
:param data: the data to be used by the kernel
:param input_dimensionality: dimensionality of the input space
:param output_dimensionality: dimensionality of the target space
:param learning_rate: learning rate to be used for the optimization
:param kernel_type: supported kernel: 'rbf', 'poly', and 'linear'
:param degree: degree of the polynomial kernel
:param sigma: the sigma value for the RBF kernel
:param kernel_scaling: scaling parameter for the kernel
:param c: constant kernel param for linear and poly kernels
:param regularizer_weight: weight of the regularizer
:param scaler: the sklearn-compatible scaler (or None)
"""
# Call base constructor
SEF_Base.__init__(self, input_dimensionality, output_dimensionality, scaler=scaler)
# Adjustable parameters
self.kernel_type = kernel_type
self.degree = degree
self.sigma_kernel = np.float32(sigma)
self.alpha = kernel_scaling
self.c = c
# If scaler is used, fit it!
if self.scaler is None:
data = np.float32(data)
else:
pass
data = np.float32(self.scaler.fit_transform(data))
# If the rbf kernel is used and no sigma is supplied, estimate it!
if sigma == 0 and self.kernel_type == 'rbf':
sigma_kernel = np.float32(mean_data_distance(data))
self.sigma_kernel = sigma_kernel
else:
self.sigma_kernel = 1
# Use kPCA for initialization
kpca = KernelPCA(kernel=self.kernel_type, n_components=self.output_dimensionality,
gamma=(1.0 / (self.sigma_kernel ** 2)), degree=self.degree, eigen_solver='dense')
kpca.fit(data)
A = kpca.alphas_
# Scale the coefficients to have unit norm (avoid rescaling)
A = A / np.sqrt(np.diag(np.dot(A.T, np.dot(np.dot(data, data.T), A))))
# Model parameters
self.X_kernel = Variable(torch.from_numpy(np.float32(data)), requires_grad=False)
self.A = Variable(torch.from_numpy(np.float32(A)), requires_grad=True)
self.trainable_params = [self.A]
self.non_trainable_params = [self.X_kernel]
def test_conv3(self):
x = torch.rand(math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)))
k = torch.rand(math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)))
imvc = torch.conv3(x, k)
imvc2 = torch.conv3(x, k, 'V')
imfc = torch.conv3(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
imvx = torch.xcorr3(x, ki)
imvx2 = torch.xcorr3(x, ki, 'V')
imfx = torch.xcorr3(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv3')
self.assertEqual(imvc, imvx, 0, 'torch.conv3')
self.assertEqual(imvc, imvx2, 0, 'torch.conv3')
self.assertEqual(imfc, imfx, 0, 'torch.conv3')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3')
xx = torch.Tensor(2, x.size(1), x.size(2), x.size(3))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2), k.size(3))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv3(xx, kk)
immvc2 = torch.conv3(xx, kk, 'V')
immfc = torch.conv3(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv3')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv3')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv3')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv3')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv3')
def test_conv3(self):
x = torch.rand(math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)))
k = torch.rand(math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)))
imvc = torch.conv3(x, k)
imvc2 = torch.conv3(x, k, 'V')
imfc = torch.conv3(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
imvx = torch.xcorr3(x, ki)
imvx2 = torch.xcorr3(x, ki, 'V')
imfx = torch.xcorr3(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv3')
self.assertEqual(imvc, imvx, 0, 'torch.conv3')
self.assertEqual(imvc, imvx2, 0, 'torch.conv3')
self.assertEqual(imfc, imfx, 0, 'torch.conv3')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3')
xx = torch.Tensor(2, x.size(1), x.size(2), x.size(3))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2), k.size(3))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv3(xx, kk)
immvc2 = torch.conv3(xx, kk, 'V')
immfc = torch.conv3(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv3')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv3')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv3')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv3')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv3')
def backward(self, gradient, image):
# lazy import
import torch
from torch.autograd import Variable
assert gradient.ndim == 1
gradient = torch.from_numpy(gradient)
if self.cuda: # pragma: no cover
gradient = gradient.cuda()
gradient = Variable(gradient)
image = self._process_input(image)
assert image.ndim == 3
images = image[np.newaxis]
images = torch.from_numpy(images)
if self.cuda: # pragma: no cover
images = images.cuda()
images = Variable(images, requires_grad=True)
predictions = self._model(images)
print(predictions.size())
predictions = predictions[0]
assert gradient.dim() == 1
assert predictions.dim() == 1
assert gradient.size() == predictions.size()
loss = torch.dot(predictions, gradient)
loss.backward()
# should be the same as predictions.backward(gradient=gradient)
grad = images.grad
grad = grad.data
if self.cuda: # pragma: no cover
grad = grad.cpu()
grad = grad.numpy()
grad = self._process_gradient(grad)
grad = np.squeeze(grad, axis=0)
assert grad.shape == image.shape
return grad
def test_conv3(self):
x = torch.rand(math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)))
k = torch.rand(math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)))
imvc = torch.conv3(x, k)
imvc2 = torch.conv3(x, k, 'V')
imfc = torch.conv3(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
imvx = torch.xcorr3(x, ki)
imvx2 = torch.xcorr3(x, ki, 'V')
imfx = torch.xcorr3(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv3')
self.assertEqual(imvc, imvx, 0, 'torch.conv3')
self.assertEqual(imvc, imvx2, 0, 'torch.conv3')
self.assertEqual(imfc, imfx, 0, 'torch.conv3')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3')
xx = torch.Tensor(2, x.size(1), x.size(2), x.size(3))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2), k.size(3))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv3(xx, kk)
immvc2 = torch.conv3(xx, kk, 'V')
immfc = torch.conv3(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv3')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv3')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv3')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv3')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv3')
def forward(self, qu, w, e_p):
qu = Variable(qu)
w = Variable(w)
embed_q = self.embed(qu)
embed_w = self.embed(w)
s_ = embed_w.size()
b_size = s_[0]
#pdb.set_trace()
h0_doc = Variable(torch.cat([self.h0_doc for _ in range(b_size)], 1))
out_qus, h_qus = self.rnn_qus(embed_q, self.h0_q)
out_doc, h_doc = self.rnn_doc(embed_w, h0_doc)
q_state = torch.cat([out_qus[0,-1,:self.config.rnn_fea_size], out_qus[0,0,self.config.rnn_fea_size:]],0)
# token attention
doc_tit_ent_dot = []
doc_tit_ent = []
doc_states = []
for i,k in enumerate(e_p):
# memory
t_e_v = self.cat(out_doc[i,1], out_doc[i,k])
# dot product
title = torch.dot(out_doc[i,1], q_state)
entity = torch.dot(out_doc[i,k], q_state)
token_att = torch.cat([title, entity],0).unsqueeze(0)
s_m = F.softmax(token_att)
att_v = torch.mm(s_m, t_e_v)
doc_tit_ent.append(att_v)
# concate start and end
state_ = torch.cat([out_doc[i,-1,:self.config.rnn_fea_size], out_doc[i,0,self.config.rnn_fea_size:]],0)
doc_states.append(state_.unsqueeze(0))
#pdb.set_trace()
t_e_vecs = torch.cat(doc_tit_ent,0)
# sentence attention
doc_states_v = torch.cat(doc_states, 0)
doc_dot = torch.mm(doc_states_v, q_state.unsqueeze(1))
doc_sm = F.softmax(doc_dot)
t_doc_feat = torch.add(doc_states_v, t_e_vecs)
doc_feat = torch.mm(doc_sm.view(1,-1), t_doc_feat)
score = torch.mm(self.embed.weight, doc_feat.view(-1,1)).view(1,-1)
score_n = F.log_softmax(score)
return score_n
def test_conv3(self):
x = torch.rand(math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)),
math.floor(torch.uniform(20, 40)))
k = torch.rand(math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)),
math.floor(torch.uniform(5, 10)))
imvc = torch.conv3(x, k)
imvc2 = torch.conv3(x, k, 'V')
imfc = torch.conv3(x, k, 'F')
ki = k.clone()
ks = k.storage()
kis = ki.storage()
for i in range(ks.size() - 1, 0, -1):
kis[ks.size() - i + 1] = ks[i]
imvx = torch.xcorr3(x, ki)
imvx2 = torch.xcorr3(x, ki, 'V')
imfx = torch.xcorr3(x, ki, 'F')
self.assertEqual(imvc, imvc2, 0, 'torch.conv3')
self.assertEqual(imvc, imvx, 0, 'torch.conv3')
self.assertEqual(imvc, imvx2, 0, 'torch.conv3')
self.assertEqual(imfc, imfx, 0, 'torch.conv3')
self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3')
xx = torch.Tensor(2, x.size(1), x.size(2), x.size(3))
xx[1].copy_(x)
xx[2].copy_(x)
kk = torch.Tensor(2, k.size(1), k.size(2), k.size(3))
kk[1].copy_(k)
kk[2].copy_(k)
immvc = torch.conv3(xx, kk)
immvc2 = torch.conv3(xx, kk, 'V')
immfc = torch.conv3(xx, kk, 'F')
self.assertEqual(immvc[0], immvc[1], 0, 'torch.conv3')
self.assertEqual(immvc[0], imvc, 0, 'torch.conv3')
self.assertEqual(immvc2[0], imvc2, 0, 'torch.conv3')
self.assertEqual(immfc[0], immfc[1], 0, 'torch.conv3')
self.assertEqual(immfc[0], imfc, 0, 'torch.conv3')
def find_proximal(x0, gam, lam, eps=1e-6, max_steps=20, debug={}):
# x0: sorted margins data
# gam: initial gamma_fast(target, perm)
# regularisation parameter lam
x = x0.clone()
act = (x >= eps).nonzero()
finished = False
if not act.size():
finished = True
else:
active = act[-1, 0]
members = {i: {i} for i in range(active + 1)}
if active > 0:
equal = (x[:active] - x[1:active+1]) < eps
for i, e in enumerate(equal):
if e:
members[i].update(members[i + 1])
members[i + 1] = members[i]
project(gam, active, members)
step = 0
while not finished and step < max_steps and active > -1:
step += 1
res = compute_step_length(x, gam, active, eps)
delta, ind = res
if ind == -1:
active = active - len(members[active])
stop = torch.dot(x - x0, gam) / torch.dot(gam, gam) + 1. / lam
if 0 <= stop < delta:
delta = stop
finished = True
x = x - delta * gam
if not finished:
if ind >= 0:
repr = min(members[ind])
members[repr].update(members[ind + 1])
for m in members[ind]:
if m != repr:
members[m] = members[repr]
project(gam, active, members)
if "path" in debug:
debug["path"].append(x.numpy())
if "step" in debug:
debug["step"] = step
if "finished" in debug:
debug["finished"] = finished
return x, gam