def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).type(torch.DoubleTensor)
x = Variable(torch.randn(5, 5), requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
python类DoubleTensor()的实例源码
def _test_barrier_helper(self, group, group_id, rank):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.DoubleTensor(1).fill_(0.0)
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterEqual(time.time(), expected_time[0])
self._barrier()
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, 0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], 0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.DoubleTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.DoubleTensor))
self.assertTrue(isinstance(q_copy[3], torch.cuda.IntStorage))
q_copy[1].fill_(10)
self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(self, tensors, num_bytes * 5 // 2)
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(self, tensors, num_bytes * 5 // 2)
def test_cuda(self, test_case):
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest('Excluded from CUDA tests')
try:
cpu_input = self._get_input()
type_map = {
torch.DoubleTensor: torch.cuda.FloatTensor,
}
gpu_input = to_gpu(cpu_input, type_map=type_map)
cpu_target = self._get_target()
gpu_target = to_gpu(cpu_target, type_map=type_map)
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args).float().cuda()
cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target)
gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target)
test_case.assertEqual(cpu_output, gpu_output, 4e-4)
cpu_gradInput = test_case._backward_criterion(cpu_module, cpu_input, cpu_target)
gpu_gradInput = test_case._backward_criterion(gpu_module, gpu_input, gpu_target)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, 4e-4)
except NotImplementedError:
pass
def test_Copy(self):
input = torch.randn(3, 4).double()
c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
output = c.forward(input)
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
self.assertEqual(output, input.float(), 1e-6)
gradInput = c.backward(input, output.fill_(1))
self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
self.assertEqual(gradInput, output.double(), 1e-6)
c.dontCast = True
c.double()
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
# Check that these don't raise errors
c.__repr__()
str(c)
def _test_neg(self, cast):
float_types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor']
int_types = ['torch.IntTensor', 'torch.ShortTensor', 'torch.ByteTensor',
'torch.CharTensor']
for t in float_types + int_types:
if t in float_types:
a = cast(torch.randn(100, 90).type(t))
else:
a = cast(torch.Tensor(100, 90).type(t).random_())
zeros = cast(torch.Tensor().type(t)).resize_as_(a).zero_()
res_add = torch.add(zeros, -1, a)
res_neg = a.clone()
res_neg.neg_()
self.assertEqual(res_neg, res_add)
# test out of place as well
res_neg_out_place = a.clone().neg()
self.assertEqual(res_neg_out_place, res_add)
# test via __neg__ operator
res_neg_op = -a.clone()
self.assertEqual(res_neg_op, res_add)
def test_abs(self):
size = 1000
max_val = 1000
original = torch.rand(size).mul(max_val)
# Tensor filled with values from {-1, 1}
switch = torch.rand(size).mul(2).floor().mul(2).add(-1)
types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
for t in types:
data = original.type(t)
switch = switch.type(t)
res = torch.mul(data, switch)
# abs is used in assertEqual so we use the slow version instead
self.assertTensorsSlowEqual(res.abs(), data, 1e-16)
# Checking that the right abs function is called for LongTensor
bignumber = 2 ^ 31 + 1
res = torch.LongTensor((-bignumber,))
self.assertGreater(res.abs()[0], 0)
def test_type(self):
l = nn.Linear(10, 20)
net = nn.Container(
l=l,
l2=l,
empty=None,
)
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
def _test_maxpool_indices(self, num_dim):
def expected_indices(dim):
if dim == 1:
return torch.DoubleTensor([1, 3])
lower_dim = expected_indices(dim-1)
lower_dim = lower_dim.view(1, *lower_dim.size())
return torch.cat((lower_dim+4, lower_dim+12), 0)
def expected_grad(dim):
if dim == 1:
return torch.DoubleTensor([0, 1, 0, 1])
lower_dim_grad = expected_grad(dim-1)
grad = lower_dim_grad.view(1, *lower_dim_grad.size())
zero = torch.zeros(grad.size())
return torch.cat((zero, grad, zero, grad), 0)
module_cls = getattr(nn, 'MaxPool{}d'.format(num_dim))
module = module_cls(2, return_indices=True)
numel = 4 ** num_dim
input = torch.range(1, numel).view(1, 1, *repeat(4, num_dim))
input_var = Variable(input, requires_grad=True)
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_indices + 1
self.assertEqual(indices.data.squeeze(), expected_indices)
self.assertEqual(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.DoubleTensor(output.size()).fill_(1)
output.backward(grad_output, retain_variables=True)
expected_grad = expected_grad(num_dim)
self.assertEqual(input_var.grad, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
def drosenbrock(tensor):
x, y = tensor
return torch.DoubleTensor((-400 * x * (y - x**2) - 2 * (1 - x), 200 * (y - x**2)))
def is_floating(t):
return type(t) in [torch.FloatTensor, torch.DoubleTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIs(type(x.float()), torch.FloatTensor)
self.assertIs(type(x.cuda()), torch.cuda.DoubleTensor)
self.assertIs(type(x.cuda().float()), torch.cuda.FloatTensor)
self.assertIs(type(x.cuda().float().cpu()), torch.FloatTensor)
self.assertIs(type(x.cuda().float().cpu().int()), torch.IntTensor)
y = x.storage()
self.assertIs(type(y.float()), torch.FloatStorage)
self.assertIs(type(y.cuda()), torch.cuda.DoubleStorage)
self.assertIs(type(y.cuda().float()), torch.cuda.FloatStorage)
self.assertIs(type(y.cuda().float().cpu()), torch.FloatStorage)
self.assertIs(type(y.cuda().float().cpu().int()), torch.IntStorage)
def drosenbrock(tensor):
x, y = tensor
return torch.DoubleTensor((-400 * x * (y - x**2) - 2 * (1 - x), 200 * x * (y - x**2)))
def test_dot(self):
types = {
'torch.DoubleTensor': 1e-8,
'torch.FloatTensor': 1e-4,
}
for tname, prec in types.items():
v1 = torch.randn(100).type(tname)
v2 = torch.randn(100).type(tname)
res1 = torch.dot(v1,v2)
res2 = 0
for i, j in zip(v1, v2):
res2 += i * j
self.assertEqual(res1, res2)
def test_sigmoid(self):
# TODO: why not simulate math.sigmoid like with rsqrt?
inputValues = [-1000,-1,0,0.5,1,2,1000]
expectedOutput = [0.0000, 0.2689, 0.5, 0.6225, 0.7311, 0.8808, 1.000]
precision_4dps = 0.0002
def checkType(tensor):
self.assertEqual(tensor(inputValues).sigmoid(), tensor(expectedOutput), precision_4dps)
checkType(torch.FloatTensor)
checkType(torch.DoubleTensor)
def test_range(self):
res1 = torch.range(0, 1)
res2 = torch.Tensor()
torch.range(res2, 0, 1)
self.assertEqual(res1, res2, 0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3)
torch.range(x.narrow(1, 1, 2), 0, 3)
res2 = torch.Tensor(((0, 0, 1), (0, 2, 3)))
self.assertEqual(x, res2, 1e-16)
# Check negative
res1 = torch.Tensor((1, 0))
res2 = torch.Tensor()
torch.range(res2, 1, 0, -1)
self.assertEqual(res1, res2, 0)
# Equal bounds
res1 = torch.ones(1)
res2 = torch.Tensor()
torch.range(res2, 1, 1, -1)
self.assertEqual(res1, res2, 0)
torch.range(res2, 1, 1, 1)
self.assertEqual(res1, res2, 0)
# FloatTensor
res1 = torch.range(torch.FloatTensor(), 0.6, 0.9, 0.1)
self.assertEqual(res1.size(0), 4)
res1 = torch.range(torch.FloatTensor(), 1, 10, 0.3)
self.assertEqual(res1.size(0), 31)
# DoubleTensor
res1 = torch.range(torch.DoubleTensor(), 0.6, 0.9, 0.1)
self.assertEqual(res1.size(0), 4)
res1 = torch.range(torch.DoubleTensor(), 1, 10, 0.3)
self.assertEqual(res1.size(0), 31)
def test_element_size(self):
byte = torch.ByteStorage().element_size()
char = torch.CharStorage().element_size()
short = torch.ShortStorage().element_size()
int = torch.IntStorage().element_size()
long = torch.LongStorage().element_size()
float = torch.FloatStorage().element_size()
double = torch.DoubleStorage().element_size()
self.assertEqual(byte, torch.ByteTensor().element_size())
self.assertEqual(char, torch.CharTensor().element_size())
self.assertEqual(short, torch.ShortTensor().element_size())
self.assertEqual(int, torch.IntTensor().element_size())
self.assertEqual(long, torch.LongTensor().element_size())
self.assertEqual(float, torch.FloatTensor().element_size())
self.assertEqual(double, torch.DoubleTensor().element_size())
self.assertGreater(byte, 0)
self.assertGreater(char, 0)
self.assertGreater(short, 0)
self.assertGreater(int, 0)
self.assertGreater(long, 0)
self.assertGreater(float, 0)
self.assertGreater(double, 0)
# These tests are portable, not necessarily strict for your system.
self.assertEqual(byte, 1)
self.assertEqual(char, 1)
self.assertGreaterEqual(short, 2)
self.assertGreaterEqual(int, 2)
self.assertGreaterEqual(int, short)
self.assertGreaterEqual(long, 4)
self.assertGreaterEqual(long, int)
self.assertGreaterEqual(double, float)
def reset(self):
self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
self.targets = torch.LongTensor(torch.LongStorage()).numpy()