def pad_batch(mini_batch):
mini_batch_size = len(mini_batch)
# print mini_batch.shape
# print mini_batch
max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
# print max_sent_len1, max_sent_len2
# max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
for idx1, i in enumerate(mini_batch):
for idx2, j in enumerate(i[0]):
try:
main_matrix1[i,j] = j
except IndexError:
pass
for idx1, i in enumerate(mini_batch):
for idx2, j in enumerate(i[1]):
try:
main_matrix2[i,j] = j
except IndexError:
pass
main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
# print main_matrix1_t.size()
# print main_matrix2_t.size()
return [main_matrix1_t, main_matrix2_t]
# return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))
# def pad_batch(mini_batch):
# # print mini_batch
# # print type(mini_batch)
# # print mini_batch.shape
# # for i, _ in enumerate(mini_batch):
# # print i, _
# return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
python类zeros()的实例源码
def forward(self, x):
outputs = []
h_t = Variable(torch.zeros(x.size(0), self.hidden_size).cuda())
c_t = Variable(torch.zeros(x.size(0), self.hidden_size).cuda())
for i, input_t in enumerate(x.chunk(x.size(1), dim=1)):
input_t = input_t.contiguous().view(input_t.size()[0], 1)
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
outputs += [c_t]
outputs = torch.stack(outputs, 1).squeeze(2)
shp=(outputs.size()[0], outputs.size()[1])
out = outputs.contiguous().view(shp[0] *shp[1] , self.hidden_size)
out = self.fc(out)
out = out.view(shp[0], shp[1], self.num_classes)
return out
def forward(self, inputs):
# set up batch size
batch_size = inputs.size(0)
# compute hidden and cell
hidden = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
cell = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
hidden_cell = (hidden, cell)
# recurrent neural networks
outputs, _ = self.rnn.forward(inputs, hidden_cell)
outputs = outputs[:, -1, :].contiguous()
# compute features by outputs
features = self.feature.forward(outputs)
return features
def forward(self, inputs):
# set up batch size
batch_size = inputs.size(0)
# compute hidden and cell
hidden = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
cell = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
hidden_cell = (hidden, cell)
# recurrent neural networks
outputs, _ = self.rnn.forward(inputs, hidden_cell)
outputs = outputs.contiguous().view(-1, self.hidden_size * 2)
# compute classifications by outputs
outputs = self.classifier.forward(outputs)
outputs = F.softmax(outputs)
outputs = outputs.view(batch_size, -1, self.num_classes)
return outputs
def init_hidden(self, height, width):
self.height = height
self.width = width
self.batch = height * width
self.cell_state = Variable(
torch.zeros(
self.lstm_layer,
self.batch,
self.hidden_dim))
self.hidden_state = Variable(
torch.zeros(
self.lstm_layer,
self.batch,
self.hidden_dim))
if self.on_gpu:
self.cell_state = self.cell_state.cuda()
self.hidden_state = self.hidden_state.cuda()
def __init__(self, env_name, num_episodes, alpha, gamma, epsilon, policy, **kwargs):
"""
base class for RL using lookup table
:param env_name: name of environment, currently environments whose observation space and action space are
both Discrete are supported. see https://github.com/openai/gym/wiki/Table-of-environments
:param num_episodes: number of episode for training
:param alpha:
:param gamma:
:param epsilon:
:param kwargs: other arguments.
"""
super(TableBase, self).__init__(env_name, num_episodes, alpha, gamma, policy, epsilon=epsilon, **kwargs)
if not isinstance(self.env.action_space, gym.spaces.Discrete) or \
not isinstance(self.env.observation_space, gym.spaces.Discrete):
raise NotImplementedError("action_space and observation_space should be Discrete")
self.obs_size = self.env.observation_space.n
self.action_size = self.env.action_space.n
self.q_table = torch.zeros(self.obs_size, self.action_size)
def test(self, dataset):
self.model.eval()
total_loss = 0
predictions = torch.zeros(len(dataset))
indices = torch.arange(1, dataset.num_classes + 1)
for idx in tqdm(range(len(dataset)),desc='Testing epoch ' + str(self.epoch) + ''):
ltree, lsent, rtree, rsent, label = dataset[idx]
linput, rinput = Var(lsent, volatile=True), Var(rsent, volatile=True)
target = Var(map_label_to_target(label, dataset.num_classes), volatile=True)
if self.args.cuda:
linput, rinput = linput.cuda(), rinput.cuda()
target = target.cuda()
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.data[0]
output = output.data.squeeze().cpu()
predictions[idx] = torch.dot(indices, torch.exp(output))
return total_loss / len(dataset), predictions
def __init__(self, nOutput, eps=1e-5, momentum=0.1, affine=True):
super(BatchNormalization, self).__init__()
assert nOutput != 0
self.affine = affine
self.eps = eps
self.train = True
self.momentum = momentum
self.running_mean = torch.zeros(nOutput)
self.running_var = torch.ones(nOutput)
self.save_mean = None
self.save_std = None
if self.affine:
self.weight = torch.Tensor(nOutput)
self.bias = torch.Tensor(nOutput)
self.gradWeight = torch.Tensor(nOutput)
self.gradBias = torch.Tensor(nOutput)
self.reset()
else:
self.weight = None
self.bias = None
self.gradWeight = None
self.gradBias = None
def __init__(self, inputsize, outputsize, bias=True):
super(PartialLinear, self).__init__()
# define the layer as a small network:
pt = ParallelTable()
pt.add(Identity()).add(LookupTable(outputsize, inputsize))
self.network = Sequential().add(pt).add(MM(False, True))
if bias:
self.bias = torch.zeros(1, outputsize)
self.gradBias = torch.zeros(1, outputsize)
else:
self.bias = self.gradBias = None
# set partition:
self.inputsize = inputsize
self.outputsize = outputsize
self.allcolumns = torch.range(0, self.outputsize-1).long()
self.resetPartition()
self.addBuffer = None
self.buffer = None
def _test_sharing(self):
def do_test():
x = torch.zeros(5, 5)
q = mp.Queue()
e = mp.Event()
data = [x, x[:, 1]]
q.put(data)
p = mp.Process(target=simple_fill, args=(q, e))
lc.check_pid(p.pid)
p.start()
e.wait()
self.assertTrue(data[0].eq(4).all())
self.assertTrue(data[1].eq(4).all())
p.join(1)
self.assertFalse(p.is_alive())
with leak_checker(self) as lc:
do_test()
def _test_pool(self):
def do_test():
p = mp.Pool(2)
for proc in p._pool:
lc.check_pid(proc.pid)
buffers = (torch.zeros(2, 2) for i in range(4))
results = p.map(simple_pool_fill, buffers, 1)
for r in results:
self.assertEqual(r, torch.ones(2, 2) * 5, 0)
self.assertEqual(len(results), 4)
p.close()
p.join()
with leak_checker(self) as lc:
do_test()
def test_linspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137)
res2 = torch.Tensor()
torch.linspace(res2, _from, to, 137)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.linspace(x.narrow(1, 1, 2), 0, 3, 4)
self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
def test_logspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137)
res2 = torch.Tensor()
torch.logspace(res2, _from, to, 137)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, 1))
self.assertEqual(torch.logspace(0, 0, 1), torch.ones(1), 0)
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2), torch.Tensor((10, 1)), 0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.logspace(x.narrow(1, 1, 2), 0, 3, 4)
self.assertEqual(x, torch.Tensor(((0, 1, 10), (0, 100, 1000))), 0)
def test_newindex(self):
reference = self._consecutive((3, 3, 3))
# This relies on __index__() being correct - but we have separate tests for that
def checkPartialAssign(index):
reference = torch.zeros(3, 3, 3)
reference[index] = self._consecutive((3, 3, 3))[index]
self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], 0)
reference[index] = 0
self.assertEqual(reference, torch.zeros(3, 3, 3), 0)
checkPartialAssign(0)
checkPartialAssign(1)
checkPartialAssign(2)
checkPartialAssign((0, 1))
checkPartialAssign((1, 2))
checkPartialAssign((0, 2))
with self.assertRaises(RuntimeError):
reference[1, 1, 1, 1] = 1
with self.assertRaises(RuntimeError):
reference[1, 1, 1, (1, 1)] = 1
with self.assertRaises(RuntimeError):
reference[3, 3, 3, 3, 3, 3, 3, 3] = 1
def test_scatter(self):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = torch.LongTensor().resize_(*idx_size)
self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)
src = torch.Tensor().resize_(*idx_size).normal_()
actual = torch.zeros(m, n, o).scatter_(dim, idx, src)
expected = torch.zeros(m, n, o)
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i,j,k]
expected[tuple(ii)] = src[i,j,k]
self.assertEqual(actual, expected, 0)
idx[0][0][0] = 34
self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, src))
def test_scatterFill(self):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
val = random.random()
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = torch.LongTensor().resize_(*idx_size)
self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)
actual = torch.zeros(m, n, o).scatter_(dim, idx, val)
expected = torch.zeros(m, n, o)
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i,j,k]
expected[tuple(ii)] = val
self.assertEqual(actual, expected, 0)
idx[0][0][0] = 28
self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, val))
def make_A(As, ns, use_gpu=False):
"""Create the 3D tensor A as needed by gel_solve, given a list of feature
matrices.
Arguments:
As: list of feature matrices, one per group (size mxn_j).
ns: LongTensor of group sizes.
use_gpu: move the final tensor to GPU.
"""
A = torch.zeros(len(ns), ns.max(), As[0].size()[0])
for j, n_j in enumerate(ns):
# Fill A[j] with A_j.T
A_j = As[j]
A[j, :n_j, :] = A_j.t()
if use_gpu:
A = A.cuda()
return A
def testAUCMeter(self):
mtr = meter.AUCMeter()
test_size = 1000
mtr.add(torch.rand(test_size), torch.zeros(test_size))
mtr.add(torch.rand(test_size), torch.Tensor(test_size).fill_(1))
val, tpr, fpr = mtr.value()
self.assertTrue(math.fabs(val - 0.5) < 0.1, msg="AUC Meter fails")
mtr.reset()
mtr.add(torch.Tensor(test_size).fill_(0), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.1), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.2), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.3), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.4), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(1),
torch.Tensor(test_size).fill_(1))
val, tpr, fpr = mtr.value()
self.assertEqual(val, 1.0, msg="AUC Meter fails")
def test_last_dim_softmax_does_softmax_on_last_dim(self):
batch_size = 1
length_1 = 5
length_2 = 3
num_options = 4
options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
for i in range(length_1):
for j in range(length_2):
options_array[0, i, j] = [2, 4, 0, 1]
options_tensor = Variable(torch.from_numpy(options_array))
softmax_tensor = util.last_dim_softmax(options_tensor).data.numpy()
assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
for i in range(length_1):
for j in range(length_2):
assert_almost_equal(softmax_tensor[0, i, j],
[0.112457, 0.830953, 0.015219, 0.041371],
decimal=5)
def test_last_dim_softmax_handles_mask_correctly(self):
batch_size = 1
length_1 = 4
length_2 = 3
num_options = 5
options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
for i in range(length_1):
for j in range(length_2):
options_array[0, i, j] = [2, 4, 0, 1, 6]
mask = Variable(torch.IntTensor([[1, 1, 1, 1, 0]]))
options_tensor = Variable(torch.from_numpy(options_array).float())
softmax_tensor = util.last_dim_softmax(options_tensor, mask).data.numpy()
assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
for i in range(length_1):
for j in range(length_2):
assert_almost_equal(softmax_tensor[0, i, j],
[0.112457, 0.830953, 0.015219, 0.041371, 0.0],
decimal=5)
def test_remove_sentence_boundaries(self):
tensor = Variable(torch.from_numpy(numpy.random.rand(3, 5, 7)))
mask = Variable(torch.from_numpy(
# The mask with two elements is to test the corner case
# of an empty sequence, so here we are removing boundaries
# from "<S> </S>"
numpy.array([[1, 1, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0]]))).long()
new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)
expected_new_tensor = Variable(torch.zeros(3, 3, 7))
expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())
expected_new_mask = Variable(torch.from_numpy(
numpy.array([[0, 0, 0],
[1, 1, 1],
[1, 1, 0]]))).long()
assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
def test_add_positional_features(self):
# This is hard to test, so we check that we get the same result as the
# original tensorflow implementation:
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py#L270
tensor2tensor_result = numpy.asarray([[0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 1.00000000e+00],
[8.41470957e-01, 9.99999902e-05, 5.40302277e-01, 1.00000000e+00],
[9.09297407e-01, 1.99999980e-04, -4.16146845e-01, 1.00000000e+00]])
tensor = Variable(torch.zeros([2, 3, 4]))
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].data.cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].data.cpu().numpy(), tensor2tensor_result)
# Check case with odd number of dimensions.
tensor2tensor_result = numpy.asarray([[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[8.41470957e-01, 9.99983307e-03, 9.99999902e-05, 5.40302277e-01,
9.99949992e-01, 1.00000000e+00, 0.00000000e+00],
[9.09297407e-01, 1.99986659e-02, 1.99999980e-04, -4.16146815e-01,
9.99800026e-01, 1.00000000e+00, 0.00000000e+00]])
tensor = Variable(torch.zeros([2, 3, 7]))
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].data.cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].data.cpu().numpy(), tensor2tensor_result)
def forward(self, hidden, encoder_outputs):
# hidden.size() = (B, H), encoder_outputs.size() = (B, S, H)
batch_size, encoder_outputs_len, _ = encoder_outputs.size()
# Create variable to store attention energies
# attn_energies.size() = (B, S)
attn_energies = Variable(torch.zeros((batch_size, encoder_outputs_len))) # B x S
if Config.use_cuda: attn_energies = attn_energies.cuda()
# Calculate energies for each encoder output
# attn_energies.size() = (B, S)
for i in range(encoder_outputs_len):
attn_energies[:, i] = self.score(hidden, encoder_outputs[:, i])
# print attn_energies[:, i]
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies)
def forward(self, H_enc):
if torch.has_cudnn:
# Initialization of the hidden states
h_t_dec = Variable(torch.zeros(self._B, self._gruout).cuda(), requires_grad=False)
# Initialization of the decoder output
H_j_dec = Variable(torch.zeros(self._B, self._T - (self._L * 2), self._gruout).cuda(), requires_grad=False)
else:
# Initialization of the hidden states
h_t_dec = Variable(torch.zeros(self._B, self._gruout), requires_grad=False)
# Initialization of the decoder output
H_j_dec = Variable(torch.zeros(self._B, self._T - (self._L * 2), self._gruout), requires_grad=False)
for ts in range(self._T - (self._L * 2)):
# GRU Decoding
h_t_dec = self.gruDec(H_enc[:, ts, :], h_t_dec)
H_j_dec[:, ts, :] = h_t_dec
return H_j_dec
def make_batch(batch_size):
batch_idx = np.random.choice(len(data),batch_size)
batch_sequences = [data[idx] for idx in batch_idx]
strokes = []
lengths = []
indice = 0
for seq in batch_sequences:
len_seq = len(seq[:,0])
new_seq = np.zeros((Nmax,5))
new_seq[:len_seq,:2] = seq[:,:2]
new_seq[:len_seq-1,2] = 1-seq[:-1,2]
new_seq[:len_seq,3] = seq[:,2]
new_seq[(len_seq-1):,4] = 1
new_seq[len_seq-1,2:4] = 0
lengths.append(len(seq[:,0]))
strokes.append(new_seq)
indice += 1
if use_cuda:
batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float())
else:
batch = Variable(torch.from_numpy(np.stack(strokes,1)).float())
return batch, lengths
################################ adaptive lr
def forward(self, inputs, batch_size, hidden_cell=None):
if hidden_cell is None:
# then must init with zeros
if use_cuda:
hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
else:
hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
hidden_cell = (hidden, cell)
_, (hidden,cell) = self.lstm(inputs.float(), hidden_cell)
# hidden is (2, batch_size, hidden_size), we want (batch_size, 2*hidden_size):
hidden_forward, hidden_backward = torch.split(hidden,1,0)
hidden_cat = torch.cat([hidden_forward.squeeze(0), hidden_backward.squeeze(0)],1)
# mu and sigma:
mu = self.fc_mu(hidden_cat)
sigma_hat = self.fc_sigma(hidden_cat)
sigma = torch.exp(sigma_hat/2.)
# N ~ N(0,1)
z_size = mu.size()
if use_cuda:
N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)).cuda())
else:
N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)))
z = mu + sigma*N
# mu and sigma_hat are needed for LKL loss
return z, mu, sigma_hat
def make_target(self, batch, lengths):
if use_cuda:
eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
*batch.size()[1]).cuda()).unsqueeze(0)
else:
eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
*batch.size()[1])).unsqueeze(0)
batch = torch.cat([batch, eos], 0)
mask = torch.zeros(Nmax+1, batch.size()[1])
for indice,length in enumerate(lengths):
mask[:length,indice] = 1
if use_cuda:
mask = Variable(mask.cuda()).detach()
else:
mask = Variable(mask).detach()
dx = torch.stack([Variable(batch.data[:,:,0])]*hp.M,2).detach()
dy = torch.stack([Variable(batch.data[:,:,1])]*hp.M,2).detach()
p1 = Variable(batch.data[:,:,2]).detach()
p2 = Variable(batch.data[:,:,3]).detach()
p3 = Variable(batch.data[:,:,4]).detach()
p = torch.stack([p1,p2,p3],2)
return mask,dx,dy,p
dataset.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def get_crop_ix(self,training_size):
rescale_sizes=self.rescale_size
crop_inds=[]
for size_pair in rescale_sizes:
mother_w,mother_h=size_pair
crop_ix=np.zeros([5,4],dtype=np.int16)
w_indices=(0,mother_w-training_size)
h_indices=(0,mother_h-training_size)
w_center=(mother_w-training_size)/2
h_center=(mother_h-training_size)/2
crop_ix[4,:]=[w_center,h_center,training_size+w_center,training_size+h_center]
cnt=0
for i in w_indices:
for j in h_indices:
crop_ix[cnt,:]=[i,j,i+training_size,j+training_size]
cnt+=1
crop_inds.append(crop_ix)
return crop_inds
def classifier(self, xs):
"""
classify an image (or a batch of images)
:param xs: a batch of scaled vectors of pixels from an image
:return: a batch of the corresponding class labels (as one-hots)
"""
# use the trained model q(y|x) = categorical(alpha(x))
# compute all class probabilities for the image(s)
alpha = self.encoder_y.forward(xs)
# get the index (digit) that corresponds to
# the maximum predicted class probability
res, ind = torch.topk(alpha, 1)
# convert the digit(s) to one-hot tensor(s)
ys = Variable(torch.zeros(alpha.size()))
ys = ys.scatter_(1, ind, 1.0)
return ys
def model(data):
# Create unit normal priors over the parameters
mu = Variable(torch.zeros(p, 1)).type_as(data)
sigma = Variable(torch.ones(p, 1)).type_as(data)
bias_mu = Variable(torch.zeros(1)).type_as(data)
bias_sigma = Variable(torch.ones(1)).type_as(data)
w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)
priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
# lift module parameters to random variables sampled from the priors
lifted_module = pyro.random_module("module", regression_model, priors)
# sample a regressor (which also samples w and b)
lifted_reg_model = lifted_module()
with pyro.iarange("map", N, subsample=data):
x_data = data[:, :-1]
y_data = data[:, -1]
# run the regressor forward conditioned on inputs
prediction_mean = lifted_reg_model(x_data).squeeze()
pyro.observe("obs", Normal(prediction_mean, Variable(torch.ones(data.size(0))).type_as(data)), y_data.squeeze())