def test_sparse_default_std(self):
for as_variable in [True, False]:
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35, as_variable=as_variable)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse(input_tensor, sparsity=sparsity)
if as_variable:
input_tensor = input_tensor.data
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * cols)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
python类sparse()的实例源码
def test_sparse_default_std(self):
for as_variable in [True, False]:
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35, as_variable=as_variable)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse(input_tensor, sparsity=sparsity)
if as_variable:
input_tensor = input_tensor.data
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * cols)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
def test_sparse_default_std(self):
for as_variable in [True, False]:
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35, as_variable=as_variable)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse(input_tensor, sparsity=sparsity)
if as_variable:
input_tensor = input_tensor.data
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * cols)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
def test_data_parallel_sparse(self):
l = nn.Embedding(10, 5, sparse=True).cuda(1)
i = Variable(torch.LongTensor(20, 5).random_(0, 10).cuda(1))
expected_out = l(i)
loss = expected_out.sum()
loss.backward()
expected_grads = []
for param in l.parameters():
expected_grads.append(param.grad.clone())
dev_ids_list = [(0, 1), (1, 0)]
for dev_id in dev_ids_list:
with torch.cuda.device(dev_id[0]):
l.cuda()
l.zero_grad()
out = dp.data_parallel(l, i, dev_id)
loss = out.sum()
loss.backward()
self.assertEqual(out.get_device(), dev_id[0])
self.assertEqual(out.data, expected_out.data)
for expected, param in zip(expected_grads, l.parameters()):
self.assertEqual(param.grad.data, expected.data)
# Check for None device_ids
l = l.cuda()
out = dp.data_parallel(l, i)
def test_sparse_default_std(self):
for as_variable in [True, False]:
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35, as_variable=as_variable)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse(input_tensor, sparsity=sparsity)
if as_variable:
input_tensor = input_tensor.data
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * cols)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
def test_sparse_only_works_on_2d_inputs(self):
for as_variable in [True, False]:
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable)
init.sparse(tensor, sparsity)
def sparse(w, sparsity, std=0.01):
return nn.sparse(w, sparsity=sparsity, std=std)
def test_sparse_only_works_on_2d_inputs(self):
for as_variable in [True, False]:
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable)
init.sparse(tensor, sparsity)
def test_sparse_only_works_on_2d_inputs(self):
for as_variable in [True, False]:
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable)
init.sparse(tensor, sparsity)
def test_embedding_padding_idx(self):
embedding = nn.Embedding(10, 20, padding_idx=0)
input = Variable(torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]]))
output = embedding(input)
self.assertEqual(output[0][0].sum().data[0], 0)
self.assertEqual(output[1][2].sum().data[0], 0)
embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True)
input = Variable(torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]]))
output = embedding(input)
self.assertEqual(output[0][0].sum().data[0], 0)
self.assertEqual(output[1][2].sum().data[0], 0)
def test_sparse_only_works_on_2d_inputs(self):
for as_variable in [True, False]:
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable)
init.sparse(tensor, sparsity)