def test_accumulate_grad(self):
import sys
grad_output = Variable(torch.ones(5, 5))
for start_volatile, end_volatile in product((True, False), repeat=2):
go1 = grad_output.data if start_volatile else grad_output
go2 = grad_output.data if end_volatile else grad_output
x = Variable(torch.randn(5, 5), requires_grad=True)
y = x + 2
y.backward(go1, retain_variables=True)
x_grad = x.grad
x_grad_clone = x.grad.data.clone()
del x
y.backward(go2)
# That's the only case when we can accumulate in-place
if start_volatile and end_volatile:
expected_grad = x_grad_clone * 2
else:
expected_grad = x_grad_clone
self.assertEqual(x_grad.data, expected_grad)
评论列表
文章目录