def test_cat(self):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.rand(13, SIZE, SIZE).transpose(0, pos_dim)
y = torch.rand(17, SIZE, SIZE).transpose(0, pos_dim)
z = torch.rand(19, SIZE, SIZE).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, 0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, 0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, 0)
x = torch.randn(20, SIZE, SIZE)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randn(1, SIZE, SIZE)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
self.assertRaises(RuntimeError, lambda: torch.cat([]))
python类rand()的实例源码
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_abs(self):
size = 1000
max_val = 1000
original = torch.rand(size).mul(max_val)
# Tensor filled with values from {-1, 1}
switch = torch.rand(size).mul(2).floor().mul(2).add(-1)
types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
for t in types:
data = original.type(t)
switch = switch.type(t)
res = torch.mul(data, switch)
# abs is used in assertEqual so we use the slow version instead
self.assertTensorsSlowEqual(res.abs(), data, 1e-16)
# Checking that the right abs function is called for LongTensor
bignumber = 2 ^ 31 + 1
res = torch.LongTensor((-bignumber,))
self.assertGreater(res.abs()[0], 0)
def test_expand(self):
tensor = torch.rand(1, 8, 1)
tensor2 = torch.rand(5)
template = torch.rand(4, 8, 5)
target = template.size()
self.assertEqual(tensor.expand_as(template).size(), target)
self.assertEqual(tensor.expand(4, 8, 5).size(), target)
self.assertEqual(tensor.expand(target).size(), target)
self.assertEqual(tensor2.expand_as(template).size(), target)
self.assertEqual(tensor2.expand(4, 8, 5).size(), target)
self.assertEqual(tensor2.expand(target).size(), target)
# test double expand
self.assertEqual(tensor2.expand(1, 5).expand(2, 2, 5), tensor2.repeat(2, 2, 1))
# test non-contiguous
noncontig = torch.randn(5, 2, 1, 3)[:, 0]
assert not noncontig.is_contiguous()
self.assertEqual(noncontig.expand(2, 5, 4, 3), noncontig.contiguous().repeat(2, 1, 4, 1))
# make sure it's compatible with unsqueeze
expanded = tensor2.expand(1, 1, 5)
unsqueezed = tensor2.unsqueeze(0).unsqueeze(1)
self.assertEqual(expanded, unsqueezed)
self.assertEqual(expanded.stride(), unsqueezed.stride())
def test_MaxUnpool2d_output_size(self):
m = nn.MaxPool2d(3, stride=2, return_indices=True)
mu = nn.MaxUnpool2d(3, stride=2)
big_t = torch.rand(1, 1, 6, 6)
big_t[0][0][4][4] = 100
output_big, indices_big = m(Variable(big_t))
self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))
small_t = torch.rand(1, 1, 5, 5)
for i in range(0, 4, 2):
for j in range(0, 4, 2):
small_t[:, :, i, j] = 100
output_small, indices_small = m(Variable(small_t))
for h in range(3, 10):
for w in range(3, 10):
if 4 <= h <= 6 and 4 <= w <= 6:
size = (h, w)
if h == 5:
size = torch.LongStorage(size)
elif h == 6:
size = torch.LongStorage((1, 1) + size)
mu(output_small, indices_small, output_size=size)
else:
self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))
def test_batchnorm_eval(self):
types = (torch.FloatTensor,)
if TEST_CUDA:
types += (torch.cuda.FloatTensor,)
for tp in types:
module = nn.BatchNorm1d(3).type(tp)
module.eval()
data = Variable(torch.rand(4, 3).type(tp), requires_grad=True)
grad = torch.rand(4, 3).type(tp)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.data.clone()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.data.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
def _test_backward(self):
v_t = torch.randn(5, 5)
x_t = torch.randn(5, 5)
y_t = torch.rand(5, 5) + 0.1
z_t = torch.randn(5, 5)
grad_output = torch.randn(5, 5)
v = Variable(v_t, requires_grad=True)
x = Variable(x_t, requires_grad=True)
y = Variable(y_t, requires_grad=True)
z = Variable(z_t, requires_grad=True)
v.backward(grad_output)
self.assertEqual(v.grad.data, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z_t.pow(2) / y_t + 1
y_grad = z_t - 4 * x_t * z_t.pow(2) / y_t.pow(2)
z_grad = 8 * x_t * z_t / y_t + y_t
self.assertEqual(x.grad.data, x_grad * grad_output)
self.assertEqual(y.grad.data, y_grad * grad_output)
self.assertEqual(z.grad.data, z_grad * grad_output)
def test_L1Penalty(self):
weight = 1
m = nn.L1Penalty(weight, False, False)
input = torch.rand(2, 10).add_(-0.5)
input[0][0] = 0
m.forward(input)
grad = m.backward(input, torch.ones(input.size()))
self.assertEqual(input.abs().sum() * weight, m.loss)
true_grad = (input.gt(0).type_as(grad) +
input.lt(0).type_as(grad).mul_(-1)).mul_(weight)
self.assertEqual(true_grad, grad)
# Check that these don't raise errors
m.__repr__()
str(m)
def test_cat(self):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.rand(13, SIZE, SIZE).transpose(0, pos_dim)
y = torch.rand(17, SIZE, SIZE).transpose(0, pos_dim)
z = torch.rand(19, SIZE, SIZE).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, 0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, 0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, 0)
x = torch.randn(20, SIZE, SIZE)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randn(1, SIZE, SIZE)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
self.assertRaises(RuntimeError, lambda: torch.cat([]))
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_abs(self):
size = 1000
max_val = 1000
original = torch.rand(size).mul(max_val)
# Tensor filled with values from {-1, 1}
switch = torch.rand(size).mul(2).floor().mul(2).add(-1)
types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
for t in types:
data = original.type(t)
switch = switch.type(t)
res = torch.mul(data, switch)
# abs is used in assertEqual so we use the slow version instead
self.assertTensorsSlowEqual(res.abs(), data, 1e-16)
# Checking that the right abs function is called for LongTensor
bignumber = 2 ^ 31 + 1
res = torch.LongTensor((-bignumber,))
self.assertGreater(res.abs()[0], 0)
def test_view(self):
tensor = torch.rand(15)
template = torch.rand(3, 5)
empty = torch.Tensor()
target = template.size()
self.assertEqual(tensor.view_as(template).size(), target)
self.assertEqual(tensor.view(3, 5).size(), target)
self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
self.assertEqual(tensor.view(-1, 5).size(), target)
self.assertEqual(tensor.view(3, -1).size(), target)
tensor_view = tensor.view(5, 3)
tensor_view.fill_(random.uniform(0, 1))
self.assertEqual((tensor_view - tensor).abs().max(), 0)
self.assertEqual(empty.view_as(empty), empty)
self.assertEqual(empty.view(0), empty)
self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
def test_bernoulli(self):
t = torch.ByteTensor(10, 10)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.rand(SIZE)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
q = torch.rand(5, 5)
self.assertTrue(isBinary(q.bernoulli()))
def feedforward_test():
import torch.nn as nn
import torch.nn.functional as F
fc1 = nn.Linear(10,20)
fc1.weight.data.normal_(0.0,1.0)
fc1.bias.data.normal_(0.0,1.0)
fc2 = nn.Linear(20,2)
fc2.weight.data.normal_(0.0,1.0)
fc2.bias.data.normal_(0.0,1.0)
model = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
data = Variable(torch.rand(10,10))
out_path = 'out'
if not os.path.isdir(out_path):
os.mkdir(out_path)
uid = str(uuid.uuid4())
torch2c.compile(model(data),'feedforward',os.path.join(out_path,uid),compile_test=True)
def act(self, state):
return th.rand(self.output_size).numpy(), None
def _sample(self):
if not self.processed:
self._process()
self.processed = True
indices = (th.rand(self.batch_size) * len(self.rewards)).int()
# TODO: Cleanup
log_actions = []
rewards = []
critics = []
entropies = []
states = []
advantages = []
actions = []
for i in indices:
actions.append(self.actions[i].value)
log_actions.append(self.actions[i].log_prob)
rewards.append(self.rewards[i])
critics.append(self.critics[i])
entropies.append(self.entropies[i])
states.append(self.states[i])
advantages.append(self.advantages[i])
actions = th.cat(actions, 0)
log_actions = th.cat(log_actions, 0)
rewards = th.cat(rewards, 0).view(-1)
critics = th.cat(critics, 0).view(-1)
entropies = th.cat(entropies, 0).view(-1)
states = th.cat(states, 0)
advantages = th.cat(advantages, 0).view(-1)
return actions, log_actions, rewards, critics, entropies, states, advantages
def bnparams(n):
return cast({'weight': torch.rand(n), 'bias': torch.zeros(n)})
def test_lerp(self):
def TH_lerp(a, b, weight):
return a + weight * (b-a);
size = (100, 100)
a = torch.rand(*size)
b = torch.rand(*size)
w = random.random()
result = torch.lerp(a, b, w)
expected = a.clone()
expected.map2_(a, b, lambda _, a, b: TH_lerp(a, b, w))
self.assertEqual(result, expected)
def test_clamp(self):
m1 = torch.rand(100).mul(5).add(-2.5) # uniform in [-2.5, 2.5]
# just in case we're extremely lucky.
min_val = -1
max_val = 1
m1[1] = min_val
m1[2] = max_val
res1 = m1.clone()
res1.clamp_(min_val, max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, min(max_val, res2[i]))
self.assertEqual(res1, res2)