def test_linspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137)
res2 = torch.Tensor()
torch.linspace(res2, _from, to, 137)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.linspace(x.narrow(1, 1, 2), 0, 3, 4)
self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
python类linspace()的实例源码
def test_linspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137)
res2 = torch.Tensor()
torch.linspace(_from, to, 137, out=res2)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
def test_linspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137)
res2 = torch.Tensor()
torch.linspace(_from, to, 137, out=res2)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
def forward(ctx, theta, size):
assert type(size) == torch.Size
N, C, H, W = size
ctx.size = size
if theta.is_cuda:
ctx.is_cuda = True
AffineGridGenerator._enforce_cudnn(theta)
grid = theta.new(N, H, W, 2)
theta = theta.contiguous()
torch._C._cudnn_affine_grid_generator_forward(theta, grid, N, C, H, W)
else:
ctx.is_cuda = False
base_grid = theta.new(N, H, W, 3)
linear_points = torch.linspace(-1, 1, W) if W > 1 else torch.Tensor([-1])
base_grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(base_grid[:, :, :, 0])
linear_points = torch.linspace(-1, 1, H) if H > 1 else torch.Tensor([-1])
base_grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(base_grid[:, :, :, 1])
base_grid[:, :, :, 2] = 1
ctx.base_grid = base_grid
grid = torch.bmm(base_grid.view(N, H * W, 3), theta.transpose(1, 2))
grid = grid.view(N, H, W, 2)
return grid
def test_linspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137)
res2 = torch.Tensor()
torch.linspace(_from, to, 137, out=res2)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
def __init__(self, grid_size, grid_bounds):
grid = torch.zeros(len(grid_bounds), grid_size)
for i in range(len(grid_bounds)):
grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_size - 2)
grid[i] = torch.linspace(grid_bounds[i][0] - grid_diff,
grid_bounds[i][1] + grid_diff,
grid_size)
inducing_points = torch.zeros(int(pow(grid_size, len(grid_bounds))), len(grid_bounds))
prev_points = None
for i in range(len(grid_bounds)):
for j in range(grid_size):
inducing_points[j * grid_size ** i:(j + 1) * grid_size ** i, i].fill_(grid[i, j])
if prev_points is not None:
inducing_points[j * grid_size ** i:(j + 1) * grid_size ** i, :i].copy_(prev_points)
prev_points = inducing_points[:grid_size ** (i + 1), :(i + 1)]
super(GridInducingPointModule, self).__init__(inducing_points)
self.grid_size = grid_size
self.grid_bounds = grid_bounds
self.register_buffer('grid', grid)
def forward(ctx, theta, size):
assert type(size) == torch.Size
N, C, H, W = size
ctx.size = size
if theta.is_cuda:
AffineGridGenerator._enforce_cudnn(theta)
assert False
ctx.is_cuda = False
base_grid = theta.new(N, H, W, 3)
linear_points = torch.linspace(-1, 1, W) if W > 1 else torch.Tensor([-1])
base_grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(base_grid[:, :, :, 0])
linear_points = torch.linspace(-1, 1, H) if H > 1 else torch.Tensor([-1])
base_grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(base_grid[:, :, :, 1])
base_grid[:, :, :, 2] = 1
ctx.base_grid = base_grid
grid = torch.bmm(base_grid.view(N, H * W, 3), theta.transpose(1, 2))
grid = grid.view(N, H, W, 2)
return grid
def test_linspace(self):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137)
res2 = torch.Tensor()
torch.linspace(_from, to, 137, out=res2)
self.assertEqual(res1, res2, 0)
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
def __init__(self, policy, target_policy, cmdl):
self.name = "Categorical-PI"
self.cmdl = cmdl
self.policy = policy
self.target_policy = target_policy
self.lr = cmdl.lr
self.gamma = cmdl.gamma
self.optimizer = optim_factory(self.policy.parameters(), cmdl)
self.optimizer.zero_grad()
self.lr_generator = lr_schedule(cmdl.lr, 0.00001, cmdl.training_steps)
self.dtype = dtype = TorchTypes(cmdl.cuda)
self.v_min, self.v_max = v_min, v_max = cmdl.v_min, cmdl.v_max
self.atoms_no = atoms_no = cmdl.atoms_no
self.support = torch.linspace(v_min, v_max, atoms_no)
self.support = self.support.type(dtype.FT)
self.delta_z = (cmdl.v_max - cmdl.v_min) / (cmdl.atoms_no - 1)
self.m = torch.zeros(cmdl.batch_size, self.atoms_no).type(dtype.FT)
def __init__(self, savefolder, imgdim, args, network):
super(ManifoldVisualizer, self).__init__(savefolder, imgdim, args)
self.network = network
self.name = "manifold"
self.parts = args.parts
z_dim = [int(np.prod(self.network.code_dims))]
self.flat_flag = z_dim[0] >= 2 * self.parts
self.hierachical_flag = z_dim[0] >= self.parts and len(z_dim) > 1 and z_dim[1] >= 2
assert self.flat_flag or self.hierachical_flag
z_dim.insert(0, self.args.num_rows * self.args.num_rows)
num_rows = self.args.num_rows
code_x = torch.linspace(-2, 2, steps=num_rows).view(1, num_rows).repeat(num_rows, 1)
code_y = code_x.t()
if self.args.ngpus > 0:
self.z = torch.cuda.FloatTensor(*z_dim).normal_()
self.code = torch.stack([code_x, code_y], dim=2).view(-1,2).cuda()
else:
self.z = torch.FloatTensor(*z_dim).normal_()
self.code = torch.stack([code_x, code_y], dim=2).view(-1,2)
def test_from_numpy(self):
dtypes = [
np.double,
np.float,
np.int64,
np.int32,
np.int16,
np.uint8
]
for dtype in dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
self.assertEqual(torch.from_numpy(array), torch.Tensor([1, 2, 3, 4]))
# check storage offset
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[1]
expected = torch.arange(1, 126).view(5, 5, 5)[1]
self.assertEqual(torch.from_numpy(x), expected)
# check noncontiguous
x = np.linspace(1, 25, 25)
x.shape = (5, 5)
expected = torch.arange(1, 26).view(5, 5).t()
self.assertEqual(torch.from_numpy(x.T), expected)
# check noncontiguous with holes
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[:, 1]
expected = torch.arange(1, 126).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
def test_from_numpy(self):
dtypes = [
np.double,
np.float,
np.int64,
np.int32,
np.int16,
np.uint8
]
for dtype in dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
self.assertEqual(torch.from_numpy(array), torch.Tensor([1, 2, 3, 4]))
# check storage offset
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[1]
expected = torch.arange(1, 126).view(5, 5, 5)[1]
self.assertEqual(torch.from_numpy(x), expected)
# check noncontiguous
x = np.linspace(1, 25, 25)
x.shape = (5, 5)
expected = torch.arange(1, 26).view(5, 5).t()
self.assertEqual(torch.from_numpy(x.T), expected)
# check noncontiguous with holes
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[:, 1]
expected = torch.arange(1, 126).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
def test_from_numpy(self):
dtypes = [
np.double,
np.float,
np.int64,
np.int32,
np.int16,
np.uint8
]
for dtype in dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
self.assertEqual(torch.from_numpy(array), torch.Tensor([1, 2, 3, 4]))
# check storage offset
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[1]
expected = torch.arange(1, 126).view(5, 5, 5)[1]
self.assertEqual(torch.from_numpy(x), expected)
# check noncontiguous
x = np.linspace(1, 25, 25)
x.shape = (5, 5)
expected = torch.arange(1, 26).view(5, 5).t()
self.assertEqual(torch.from_numpy(x.T), expected)
# check noncontiguous with holes
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[:, 1]
expected = torch.arange(1, 126).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
# check zero dimensional
x = np.zeros((0, 2))
self.assertRaises(RuntimeError, lambda: torch.from_numpy(x))
def __init__(self, n_bins=15):
"""
n_bins (int): number of confidence interval bins
"""
super(_ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def train_data(cuda=False):
train_x = Variable(torch.linspace(0, 1, 10))
train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
if cuda:
return train_x.cuda(), train_y.cuda()
else:
return train_x, train_y
def make_data(cuda=False):
train_x = Variable(torch.linspace(0, 1, 100))
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)))
test_x = Variable(torch.linspace(0, 1, 51))
test_y = Variable(torch.sin(test_x.data * (2 * math.pi)))
if cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
test_x = test_x.cuda()
test_y = test_y.cuda()
return train_x, train_y, test_x, test_y
# All tests that pass with the exact kernel should pass with the interpolated kernel.
def test_interpolation():
x = torch.linspace(0.01, 1, 100)
grid = torch.linspace(-0.05, 1.05, 50)
J, C = Interpolation().interpolate(grid, x)
W = utils.toeplitz.index_coef_to_sparse(J, C, len(grid))
test_func_grid = grid.pow(2)
test_func_x = x.pow(2)
interp_func_x = torch.dsmm(W, test_func_grid.unsqueeze(1)).squeeze()
assert all(torch.abs(interp_func_x - test_func_x) / (test_func_x + 1e-10) < 1e-5)
def __init__(self, policy, cmdl):
"""Assumes policy returns an autograd.Variable"""
self.name = "CP"
self.cmdl = cmdl
self.policy = policy
self.dtype = dtype = TorchTypes(cmdl.cuda)
self.support = torch.linspace(cmdl.v_min, cmdl.v_max, cmdl.atoms_no)
self.support = self.support.type(dtype.FT)
def make_code(self, num_rows):
z_dim = [int(np.prod(self.network.code_dims))]
code_x = torch.linspace(-2, 2, steps=num_rows).view(1, num_rows).repeat(num_rows, 1)
code_y = code_x.t()
if self.args.ngpus > 0:
z = torch.cuda.FloatTensor(*z_dim).normal_()
code = torch.stack([code_x, code_y], dim=2).view(-1,2).cuda()
else:
z = torch.FloatTensor(*z_dim).normal_()
code = torch.stack([code_x, code_y], dim=2).view(-1,2)
return code
def test_from_numpy(self):
dtypes = [
np.double,
np.float,
np.float16,
np.int64,
np.int32,
np.int16,
np.uint8
]
for dtype in dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
tensor_from_array = torch.from_numpy(array)
# TODO: change to tensor equality check once HalfTensor
# implements `==`
for i in range(len(array)):
self.assertEqual(tensor_from_array[i], array[i])
# check storage offset
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[1]
expected = torch.arange(1, 126).view(5, 5, 5)[1]
self.assertEqual(torch.from_numpy(x), expected)
# check noncontiguous
x = np.linspace(1, 25, 25)
x.shape = (5, 5)
expected = torch.arange(1, 26).view(5, 5).t()
self.assertEqual(torch.from_numpy(x.T), expected)
# check noncontiguous with holes
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[:, 1]
expected = torch.arange(1, 126).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
# check zero dimensional
x = np.zeros((0, 2))
self.assertEqual(torch.from_numpy(x).shape, tuple())
self.assertEqual(torch.autograd.Variable.from_numpy(x).shape, [0])
def _get_categorical(self, next_states, rewards, mask):
batch_sz = next_states.size(0)
gamma = self.gamma
# Compute probabilities p(x, a)
probs = self.target_policy(next_states).data
qs = torch.mul(probs, self.support.expand_as(probs))
argmax_a = qs.sum(2).max(1)[1].unsqueeze(1).unsqueeze(1)
action_mask = argmax_a.expand(batch_sz, 1, self.atoms_no)
qa_probs = probs.gather(1, action_mask).squeeze()
# Mask gamma and reshape it torgether with rewards to fit p(x,a).
rewards = rewards.expand_as(qa_probs)
gamma = (mask.float() * gamma).expand_as(qa_probs)
# Compute projection of the application of the Bellman operator.
bellman_op = rewards + gamma * self.support.unsqueeze(0).expand_as(rewards)
bellman_op = torch.clamp(bellman_op, self.v_min, self.v_max)
# Compute categorical indices for distributing the probability
m = self.m.fill_(0)
b = (bellman_op - self.v_min) / self.delta_z
l = b.floor().long()
u = b.ceil().long()
# Distribute probability
"""
for i in range(batch_sz):
for j in range(self.atoms_no):
uidx = u[i][j]
lidx = l[i][j]
m[i][lidx] = m[i][lidx] + qa_probs[i][j] * (uidx - b[i][j])
m[i][uidx] = m[i][uidx] + qa_probs[i][j] * (b[i][j] - lidx)
for i in range(batch_sz):
m[i].index_add_(0, l[i], qa_probs[i] * (u[i].float() - b[i]))
m[i].index_add_(0, u[i], qa_probs[i] * (b[i] - l[i].float()))
"""
# Optimized by https://github.com/tudor-berariu
offset = torch.linspace(0, ((batch_sz - 1) * self.atoms_no), batch_sz)\
.type(self.dtype.LT)\
.unsqueeze(1).expand(batch_sz, self.atoms_no)
m.view(-1).index_add_(0, (l + offset).view(-1),
(qa_probs * (u.float() - b)).view(-1))
m.view(-1).index_add_(0, (u + offset).view(-1),
(qa_probs * (b - l.float())).view(-1))
return Variable(m)