def test_sparse_repeat_2d():
actual = sparse.to_dense().repeat(3, 2)
res = sparse_repeat(sparse, 3, 2)
assert torch.equal(actual, res.to_dense())
actual = sparse.to_dense().repeat(1, 2)
res = sparse_repeat(sparse, 1, 2)
assert torch.equal(actual, res.to_dense())
actual = sparse.to_dense().repeat(3, 1)
res = sparse_repeat(sparse, 3, 1)
assert torch.equal(actual, res.to_dense())
python类equal()的实例源码
def test_trace_components_normal_matrices():
a_mat = torch.randn(3, 4)
b_mat = torch.randn(3, 4)
a_res, b_res = trace_components(a_mat, b_mat)
assert torch.equal(a_res, a_mat)
assert torch.equal(b_res, b_mat)
def test_sym_toeplitz_constructs_tensor_from_vector():
c = torch.Tensor([1, 6, 4, 5])
res = utils.toeplitz.sym_toeplitz(c)
actual = torch.Tensor([
[1, 6, 4, 5],
[6, 1, 6, 4],
[4, 6, 1, 6],
[5, 4, 6, 1],
])
assert torch.equal(res, actual)
def test_reverse():
input = torch.Tensor([
[1, 2, 3],
[4, 5, 6],
])
res = torch.Tensor([
[3, 2, 1],
[6, 5, 4],
])
assert torch.equal(utils.reverse(input, dim=1), res)
def test_rcumsum():
input = torch.Tensor([
[1, 2, 3],
[4, 5, 6],
])
res = torch.Tensor([
[6, 5, 3],
[15, 11, 6],
])
assert torch.equal(utils.rcumsum(input, dim=1), res)
def test_input_dropout_WITH_PROB_ZERO(self):
rnn = EncoderRNN(self.vocab_size, 50, 16, input_dropout_p=0)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
output1, _ = rnn(self.input_var, self.lengths)
output2, _ = rnn(self.input_var, self.lengths)
self.assertTrue(torch.equal(output1.data, output2.data))
def test_input_dropout_WITH_NON_ZERO_PROB(self):
rnn = EncoderRNN(self.vocab_size, 50, 16, input_dropout_p=0.5)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
equal = True
for _ in range(50):
output1, _ = rnn(self.input_var, self.lengths)
output2, _ = rnn(self.input_var, self.lengths)
if not torch.equal(output1.data, output2.data):
equal = False
break
self.assertFalse(equal)
def test_dropout_WITH_PROB_ZERO(self):
rnn = EncoderRNN(self.vocab_size, 50, 16, dropout_p=0)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
output1, _ = rnn(self.input_var, self.lengths)
output2, _ = rnn(self.input_var, self.lengths)
self.assertTrue(torch.equal(output1.data, output2.data))
def test_dropout_WITH_NON_ZERO_PROB(self):
# It's critical to set n_layer=2 here since dropout won't work
# when the RNN only has one layer according to pytorch's doc
rnn = EncoderRNN(self.vocab_size, 50, 16, n_layers=2, dropout_p=0.5)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
equal = True
for _ in range(50):
output1, _ = rnn(self.input_var, self.lengths)
output2, _ = rnn(self.input_var, self.lengths)
if not torch.equal(output1.data, output2.data):
equal = False
break
self.assertFalse(equal)
def test_k_1(self):
""" When k=1, the output of topk decoder should be the same as a normal decoder. """
batch_size = 1
eos = 1
for _ in range(10):
# Repeat the randomized test multiple times
decoder = DecoderRNN(self.vocab_size, 50, 16, 0, eos)
for param in decoder.parameters():
param.data.uniform_(-1, 1)
topk_decoder = TopKDecoder(decoder, 1)
output, _, other = decoder()
output_topk, _, other_topk = topk_decoder()
self.assertEqual(len(output), len(output_topk))
finished = [False] * batch_size
seq_scores = [0] * batch_size
for t_step, t_output in enumerate(output):
score, _ = t_output.topk(1)
symbols = other['sequence'][t_step]
for b in range(batch_size):
seq_scores[b] += score[b].data[0]
symbol = symbols[b].data[0]
if not finished[b] and symbol == eos:
finished[b] = True
self.assertEqual(other_topk['length'][b], t_step + 1)
self.assertTrue(np.isclose(seq_scores[b], other_topk['score'][b][0]))
if not finished[b]:
symbol_topk = other_topk['topk_sequence'][t_step][b].data[0][0]
self.assertEqual(symbol, symbol_topk)
self.assertTrue(torch.equal(t_output.data, output_topk[t_step].data))
if sum(finished) == batch_size:
break
def test_input_dropout_WITH_NON_ZERO_PROB(self):
rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, input_dropout_p=0.5)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
equal = True
for _ in range(50):
output1, _, _ = rnn()
output2, _, _ = rnn()
if not torch.equal(output1[0].data, output2[0].data):
equal = False
break
self.assertFalse(equal)
def test_dropout_WITH_PROB_ZERO(self):
rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, dropout_p=0)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
output1, _, _ = rnn()
output2, _, _ = rnn()
for prob1, prob2 in zip(output1, output2):
self.assertTrue(torch.equal(prob1.data, prob2.data))
def test_dropout_WITH_NON_ZERO_PROB(self):
rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, n_layers=2, dropout_p=0.5)
for param in rnn.parameters():
param.data.uniform_(-1, 1)
equal = True
for _ in range(50):
output1, _, _ = rnn()
output2, _, _ = rnn()
if not torch.equal(output1[0].data, output2[0].data):
equal = False
break
self.assertFalse(equal)
def word_dropout_mask(X, dropout_rate, reserved_codes=()):
"""
Computes a binary mask across batch examples based on a
bernoulli distribution with mean equal to dropout.
"""
probs = torch.zeros_like(X).float() + dropout_rate
# zero reserved_codes (avoid dropping reserved symbols)
if len(reserved_codes) > 0:
probs[sum((X == x) for x in reserved_codes)] = 0
# return binary mask
return torch.bernoulli(probs).byte()
def test_python_ir(self):
x = Variable(torch.Tensor([0.4]), requires_grad=True)
y = Variable(torch.Tensor([0.7]), requires_grad=True)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
traced, _ = torch.jit.trace(doit, (x, y))
g = torch._C._jit_get_graph(traced)
g2 = torch._C.Graph()
g_to_g2 = {}
for node in g.inputs():
g_to_g2[node] = g2.addInput()
for node in g.nodes():
n_ = g2.createClone(node, lambda x: g_to_g2[x])
g2.appendNode(n_)
for o, no in zip(node.outputs(), n_.outputs()):
g_to_g2[o] = no
for node in g.outputs():
g2.registerOutput(g_to_g2[node])
t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2]))
assert(t_node.attributeNames() == ["a"])
g2.appendNode(t_node)
assert(torch.equal(torch.ones([2, 2]), t_node.t("a")))
self.assertExpected(str(g2))
def test_vector_to_parameters(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = Variable(torch.arange(0, 980))
vector_to_parameters(vec, model.parameters())
sample = next(model.parameters())[0, 0, 0]
self.assertTrue(torch.equal(sample.data, vec.data[:5]))
def _test_InstanceNorm(self, cls, input):
b, c = input.size(0), input.size(1)
input_var = Variable(input)
IN = cls(c, eps=0)
output = IN(input_var)
out_reshaped = output.transpose(1, 0).contiguous().view(c, -1)
mean = out_reshaped.mean(1)
var = out_reshaped.var(1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean.data).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data).mean(), 1, delta=1e-5)
# If momentum==1 running_mean/var should be
# equal to mean/var of the input
IN = cls(c, momentum=1, eps=0)
output = IN(input_var)
input_reshaped = input_var.transpose(1, 0).contiguous().view(c, -1)
mean = input_reshaped.mean(1)
input_reshaped = input_var.transpose(1, 0).contiguous().view(c, b, -1)
var = input_reshaped.var(2, unbiased=True)[:, :]
self.assertAlmostEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, delta=1e-5)
def test_orthogonal(self):
for as_variable in [True, False]:
for use_gain in [True, False]:
for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
input_tensor = torch.zeros(tensor_size)
gain = 1.0
if as_variable:
input_tensor = Variable(input_tensor)
if use_gain:
gain = self._random_float(0.1, 2)
init.orthogonal(input_tensor, gain=gain)
else:
init.orthogonal(input_tensor)
if as_variable:
input_tensor = input_tensor.data
rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
flattened_tensor = input_tensor.view(rows, cols)
if rows > cols:
self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
torch.eye(cols) * gain ** 2, prec=1e-6)
else:
self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
torch.eye(rows) * gain ** 2, prec=1e-6)
# Generates rand tensor with non-equal values. This ensures that duplicate
# values won't be causing test failure for modules like MaxPooling.
# size should be small, otherwise randperm fails / long overflows.
def assertNotEqual(self, x, y, prec=None, message=''):
if prec is None:
prec = self.precision
x, y = self.unwrapVariables(x, y)
if torch.is_tensor(x) and torch.is_tensor(y):
if x.size() != y.size():
super(TestCase, self).assertNotEqual(x.size(), y.size())
self.assertGreater(x.numel(), 0)
y = y.type_as(x)
y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()
nan_mask = x != x
if torch.equal(nan_mask, y != y):
diff = x - y
if diff.is_signed():
diff = diff.abs()
diff[nan_mask] = 0
max_err = diff.max()
self.assertGreaterEqual(max_err, prec, message)
elif type(x) == str and type(y) == str:
super(TestCase, self).assertNotEqual(x, y)
elif is_iterable(x) and is_iterable(y):
super(TestCase, self).assertNotEqual(x, y)
else:
try:
self.assertGreaterEqual(abs(x - y), prec, message)
return
except (TypeError, AssertionError):
pass
super(TestCase, self).assertNotEqual(x, y, message)
def test_erfinv(self):
def checkType(tensor):
inputValues = torch.randn(4, 4, out=tensor()).clamp(-2., 2.)
self.assertEqual(tensor(inputValues).erf().erfinv(), tensor(inputValues))
# test inf
self.assertTrue(torch.equal(tensor([-1, 1]).erfinv(), tensor([float('-inf'), float('inf')])))
# test nan
self.assertEqual(tensor([-2, 2]).erfinv(), tensor([float('nan'), float('nan')]))
checkType(torch.FloatTensor)
checkType(torch.DoubleTensor)