def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = str(txn.get(label_key))
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
python类range()的实例源码
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def updateGradInput(self, input, gradOutput):
input, mask = input
if input.type() == 'torch.cuda.FloatTensor':
torch.range(self._maskIndexBufferCPU, 0, mask.nelement()-1).resize_(mask.size())
self._maskIndexBuffer.resize_(self._maskIndexBufferCPU.size()).copy_(self._maskIndexBufferCPU)
else:
torch.range(self._maskIndexBuffer, 0, mask.nelement()-1).resize_(mask.size())
torch.masked_select(self._maskIndices, self._maskIndexBuffer, mask)
self._gradBuffer.resize_(input.nelement()).zero_()
self._gradBuffer.scatter_(0, self._maskIndices, gradOutput)
self._gradBuffer.resize_(input.size())
self.gradInput = [self._gradBuffer, self._gradMask.resize_(mask.size()).fill_(0)]
return self.gradInput
def _test_maxpool_indices(self, num_dim):
def expected_indices(dim):
if dim == 1:
return torch.DoubleTensor([1, 3])
lower_dim = expected_indices(dim-1)
lower_dim = lower_dim.view(1, *lower_dim.size())
return torch.cat((lower_dim+4, lower_dim+12), 0)
def expected_grad(dim):
if dim == 1:
return torch.DoubleTensor([0, 1, 0, 1])
lower_dim_grad = expected_grad(dim-1)
grad = lower_dim_grad.view(1, *lower_dim_grad.size())
zero = torch.zeros(grad.size())
return torch.cat((zero, grad, zero, grad), 0)
module_cls = getattr(nn, 'MaxPool{}d'.format(num_dim))
module = module_cls(2, return_indices=True)
numel = 4 ** num_dim
input = torch.range(1, numel).view(1, 1, *repeat(4, num_dim))
input_var = Variable(input, requires_grad=True)
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_indices + 1
self.assertEqual(indices.data.squeeze(), expected_indices)
self.assertEqual(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.DoubleTensor(output.size()).fill_(1)
output.backward(grad_output, retain_variables=True)
expected_grad = expected_grad(num_dim)
self.assertEqual(input_var.grad, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = Variable(torch.randn(2, 3, 6, 6))
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
size = (h, w)
if h == 19:
size = torch.LongStorage(size)
elif h == 2:
size = torch.LongStorage((2, 4) + size)
m(i, output_size=(h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_RNN_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for module in (nn.RNNCell, nn.GRUCell):
for bias in (True, False):
input = Variable(torch.randn(3, 10))
hx = Variable(torch.randn(3, 20))
cell = module(10, 20, bias=bias)
for i in range(6):
hx = cell(input, hx)
hx.sum().backward()
def test_LSTM_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for bias in (True, False):
input = Variable(torch.randn(3, 10))
hx = Variable(torch.randn(3, 20))
cx = Variable(torch.randn(3, 20))
lstm = nn.LSTMCell(10, 20, bias=bias)
for i in range(6):
hx, cx = lstm(input, (hx, cx))
(hx+cx).sum().backward()
def small_3d_unique(t):
return t(S, S, S).copy_(torch.range(1, S*S*S))
def small_1d_lapack(t):
return torch.range(1, 3).view(3)
def small_2d_lapack(t):
return torch.range(1, 9).view(3, 3)
def small_2d_lapack_fat(t):
return torch.range(1, 12).view(4, 3)
def test_from_sequence(self):
seq = [list(range(i*4,i*4+4)) for i in range(5)]
reference = torch.range(0, 19).resize_(5, 4)
for t in types:
cuda_type = get_gpu_type(t)
self.assertEqual(cuda_type(seq), reference)
def test_mul(self):
m1 = torch.randn(10,10)
res1 = m1.clone()
res1[:,3].mul_(2)
res2 = m1.clone()
for i in range(res1.size(0)):
res2[i,3] = res2[i,3] * 2
self.assertEqual(res1, res2)
def test_fmod(self):
m1 = torch.Tensor(10,10).uniform_(-10., 10.)
res1 = m1.clone()
q = 2.1
res1[:,3].fmod_(q)
res2 = m1.clone()
for i in range(m1.size(1)):
res2[i,3] = math.fmod(res2[i,3], q)
self.assertEqual(res1, res2)
def test_remainder(self):
m1 = torch.Tensor(10, 10).uniform_(-10., 10.)
res1 = m1.clone()
q = 2.1
res1[:,3].remainder_(q)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i,3] = res2[i,3] % q
self.assertEqual(res1, res2)
def test_bmm(self):
num_batches = 10
M, N, O = 23, 8, 12
b1 = torch.randn(num_batches, M, N)
b2 = torch.randn(num_batches, N, O)
res = torch.bmm(b1, b2)
for i in range(num_batches):
r = torch.mm(b1[i], b2[i])
self.assertEqual(r, res[i])
def test_pow(self):
# [res] torch.pow([res,] x)
# base - tensor, exponent - number
# contiguous
m1 = torch.randn(100,100)
res1 = torch.pow(m1[4], 3)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = math.pow(m1[4][i], 3)
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(100,100)
res1 = torch.pow(m1[:,4], 3)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = math.pow(m1[i,4], 3)
self.assertEqual(res1, res2)
# base - number, exponent - tensor
# contiguous
m1 = torch.randn(100,100)
res1 = torch.pow(3, m1[4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = math.pow(3, m1[4,i])
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(100,100)
res1 = torch.pow(3, m1[:,4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = math.pow(3, m1[i][4])
self.assertEqual(res1, res2)
def test_range(self):
res1 = torch.range(0, 1)
res2 = torch.Tensor()
torch.range(res2, 0, 1)
self.assertEqual(res1, res2, 0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3)
torch.range(x.narrow(1, 1, 2), 0, 3)
res2 = torch.Tensor(((0, 0, 1), (0, 2, 3)))
self.assertEqual(x, res2, 1e-16)
# Check negative
res1 = torch.Tensor((1, 0))
res2 = torch.Tensor()
torch.range(res2, 1, 0, -1)
self.assertEqual(res1, res2, 0)
# Equal bounds
res1 = torch.ones(1)
res2 = torch.Tensor()
torch.range(res2, 1, 1, -1)
self.assertEqual(res1, res2, 0)
torch.range(res2, 1, 1, 1)
self.assertEqual(res1, res2, 0)
# FloatTensor
res1 = torch.range(torch.FloatTensor(), 0.6, 0.9, 0.1)
self.assertEqual(res1.size(0), 4)
res1 = torch.range(torch.FloatTensor(), 1, 10, 0.3)
self.assertEqual(res1.size(0), 31)
# DoubleTensor
res1 = torch.range(torch.DoubleTensor(), 0.6, 0.9, 0.1)
self.assertEqual(res1.size(0), 4)
res1 = torch.range(torch.DoubleTensor(), 1, 10, 0.3)
self.assertEqual(res1.size(0), 31)
def test_mode(self):
x = torch.range(1, SIZE * SIZE).clone().resize_(SIZE, SIZE)
x[:2] = 1
x[:,:2] = 1
x0 = x.clone()
# Pre-calculated results.
res1val = torch.Tensor(SIZE, 1).fill_(1)
# The indices are the position of the last appearance of the mode element.
res1ind = torch.LongTensor(SIZE, 1).fill_(1)
res1ind[0] = SIZE-1
res1ind[1] = SIZE-1
res2val, res2ind = torch.mode(x)
self.assertEqual(res1val, res2val, 0)
self.assertEqual(res1ind, res2ind, 0)
# Test use of result tensor
res2val = torch.Tensor()
res2ind = torch.LongTensor()
torch.mode(res2val, res2ind, x)
self.assertEqual(res1val, res2val, 0)
self.assertEqual(res1ind, res2ind, 0)
# Test non-default dim
res2val, res2ind = torch.mode(x, 0)
self.assertEqual(res1val.view(1, SIZE), res2val, 0)
self.assertEqual(res1ind.view(1, SIZE), res2ind, 0)
# input unchanged
self.assertEqual(x, x0, 0)
def test_xcorr3_xcorr2_eq(self):
def reference(x, k, o3, o32):
for i in range(o3.size(1)):
for j in range(k.size(1)):
o32[i].add(torch.xcorr2(x[i+j-1], k[j]))
self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k), reference)