def th_map_coordinates(input, coords, order=1):
"""Tensorflow verion of scipy.ndimage.map_coordinates
Note that coords is transposed and only 2D is supported
Parameters
----------
input : tf.Tensor. shape = (s, s)
coords : tf.Tensor. shape = (n_points, 2)
"""
assert order == 1
input_size = input.size(0)
coords = torch.clamp(coords, 0, input_size - 1)
coords_lt = coords.floor().long()
coords_rb = coords.ceil().long()
coords_lb = torch.stack([coords_lt[:, 0], coords_rb[:, 1]], 1)
coords_rt = torch.stack([coords_rb[:, 0], coords_lt[:, 1]], 1)
vals_lt = th_gather_2d(input, coords_lt.detach())
vals_rb = th_gather_2d(input, coords_rb.detach())
vals_lb = th_gather_2d(input, coords_lb.detach())
vals_rt = th_gather_2d(input, coords_rt.detach())
coords_offset_lt = coords - coords_lt.type(coords.data.type())
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]
return mapped_vals
python类clamp()的实例源码
def test_clamp(self):
m1 = torch.rand(100).mul(5).add(-2.5) # uniform in [-2.5, 2.5]
# just in case we're extremely lucky.
min_val = -1
max_val = 1
m1[1] = min_val
m1[2] = max_val
res1 = m1.clone()
res1.clamp_(min_val, max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, min(max_val, res2[i]))
self.assertEqual(res1, res2)
res1 = torch.clamp(m1, min=min_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, res2[i])
self.assertEqual(res1, res2)
res1 = torch.clamp(m1, max=max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = min(max_val, res2[i])
self.assertEqual(res1, res2)
def test_masked_select(self):
num_src = 10
src = torch.randn(num_src)
mask = torch.rand(num_src).clamp(0, 1).mul(2).floor().byte()
dst = src.masked_select(mask)
dst2 = []
for i in range(num_src):
if mask[i]:
dst2 += [src[i]]
self.assertEqual(dst, torch.Tensor(dst2), 0)
def hinge_loss(positive_predictions, negative_predictions, mask=None):
"""
Hinge pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
loss = torch.clamp(negative_predictions -
positive_predictions +
1.0, 0.0)
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def logistic_loss(observed_ratings, predicted_ratings):
"""
Logistic loss for explicit data.
Parameters
----------
observed_ratings: tensor
Tensor containing observed ratings which
should be +1 or -1 for this loss function.
predicted_ratings: tensor
Tensor containing rating predictions.
Returns
-------
loss, float
The mean value of the loss function.
"""
assert_no_grad(observed_ratings)
# Convert target classes from (-1, 1) to (0, 1)
observed_ratings = torch.clamp(observed_ratings, 0, 1)
return F.binary_cross_entropy_with_logits(predicted_ratings,
observed_ratings,
size_average=True)
def _set_hook_func(self):
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_in[0].cpu()
# Cut off negative gradients
if isinstance(module, nn.ReLU):
return (torch.clamp(grad_in[0], min=0.0),)
for module in self.model.named_modules():
module[1].register_backward_hook(func_b)
def log_Bernoulli(x, mean, average=False, dim=None):
probs = torch.clamp( mean, min=1e-7, max=1.-1e-7 )
log_bernoulli = x * torch.log( probs ) + (1. - x ) * torch.log( 1. - probs )
if average:
return torch.mean( log_bernoulli, dim )
else:
return torch.sum( log_bernoulli, dim )
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=True):
r"""Function that measures Binary Cross Entropy between target and output
logits.
See :class:`~torch.nn.BCEWithLogitsLoss` for details.
Args:
input: Variable of arbitrary shape
target: Variable of the same shape as input
weight (Variable, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch. Default: True
Examples::
>>> input = autograd.Variable(torch.randn(3), requires_grad=True)
>>> target = autograd.Variable(torch.FloatTensor(3).random_(2))
>>> loss = F.binary_cross_entropy_with_logits(input, target)
>>> loss.backward()
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
if weight is not None:
loss = loss * weight
if size_average:
return loss.mean()
else:
return loss.sum()
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
r"""Returns cosine similarity between x1 and x2, computed along dim.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
Args:
x1 (Variable): First input.
x2 (Variable): Second input (of size matching x1).
dim (int, optional): Dimension of vectors. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
- Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
Example::
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.cosine_similarity(input1, input2)
>>> print(output)
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def test_erfinv(self):
def checkType(tensor):
inputValues = torch.randn(4, 4, out=tensor()).clamp(-2., 2.)
self.assertEqual(tensor(inputValues).erf().erfinv(), tensor(inputValues))
# test inf
self.assertTrue(torch.equal(tensor([-1, 1]).erfinv(), tensor([float('-inf'), float('inf')])))
# test nan
self.assertEqual(tensor([-2, 2]).erfinv(), tensor([float('nan'), float('nan')]))
checkType(torch.FloatTensor)
checkType(torch.DoubleTensor)
def test_masked_select(self):
num_src = 10
src = torch.randn(num_src)
mask = torch.rand(num_src).clamp(0, 1).mul(2).floor().byte()
dst = src.masked_select(mask)
dst2 = []
for i in range(num_src):
if mask[i]:
dst2 += [src[i]]
self.assertEqual(dst, torch.Tensor(dst2), 0)
def forward(self, anchor, positive, negative):
d_p = self.pdist.forward(anchor, positive)
d_n = self.pdist.forward(anchor, negative)
dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0.0)
loss = torch.mean(dist_hinge)
return loss
def clip(tensor, a_min=None, a_max=None, inplace=False):
if a_max is None:
a_max = torch.max(tensor)
if a_min is None:
a_min = torch.min(tensor)
if inplace:
return torch.clamp(tensor, a_min, a_max, out=tensor)
else:
return torch.clamp(tensor, a_min, a_max)
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_input = th.clamp(_input.float().add(self.value).type(_input.type()), 0, 1)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_in_gs = Grayscale(keep_channels=True)(_input)
alpha = 1.0 + self.value
_in = th.clamp(_blend(_input, _in_gs, alpha), 0, 1)
outputs.append(_in)
return outputs if idx > 1 else outputs[0]
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
channel_means = _input.mean(1).mean(2)
channel_means = channel_means.expand_as(_input)
_input = th.clamp((_input - channel_means) * self.value + channel_means,0,1)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
def F_bilinear_interp2d(input, coords):
"""
bilinear interpolation of 2d torch.autograd.Variable
"""
x = torch.clamp(coords[:,:,0], 0, input.size(1)-2)
x0 = x.floor()
x1 = x0 + 1
y = torch.clamp(coords[:,:,1], 0, input.size(2)-2)
y0 = y.floor()
y1 = y0 + 1
stride = torch.LongTensor(input.stride())
x0_ix = x0.mul(stride[1]).long()
x1_ix = x1.mul(stride[1]).long()
y0_ix = y0.mul(stride[2]).long()
y1_ix = y1.mul(stride[2]).long()
input_flat = input.view(input.size(0),-1).contiguous()
vals_00 = input_flat.gather(1, x0_ix.add(y0_ix).detach())
vals_10 = input_flat.gather(1, x1_ix.add(y0_ix).detach())
vals_01 = input_flat.gather(1, x0_ix.add(y1_ix).detach())
vals_11 = input_flat.gather(1, x1_ix.add(y1_ix).detach())
xd = x - x0
yd = y - y0
xm = 1 - xd
ym = 1 - yd
x_mapped = (vals_00.mul(xm).mul(ym) +
vals_10.mul(xd).mul(ym) +
vals_01.mul(xm).mul(yd) +
vals_11.mul(xd).mul(yd))
return x_mapped.view_as(input)
def F_batch_bilinear_interp2d(input, coords):
"""
input : torch.Tensor
size = (N,H,W,C)
coords : torch.Tensor
size = (N,H*W*C,2)
"""
x = torch.clamp(coords[:,:,0], 0, input.size(2)-2)
x0 = x.floor()
x1 = x0 + 1
y = torch.clamp(coords[:,:,1], 0, input.size(3)-2)
y0 = y.floor()
y1 = y0 + 1
stride = torch.LongTensor(input.stride())
x0_ix = x0.mul(stride[2]).long()
x1_ix = x1.mul(stride[2]).long()
y0_ix = y0.mul(stride[3]).long()
y1_ix = y1.mul(stride[3]).long()
input_flat = input.view(input.size(0),-1).contiguous()
vals_00 = input_flat.gather(1, x0_ix.add(y0_ix).detach())
vals_10 = input_flat.gather(1, x1_ix.add(y0_ix).detach())
vals_01 = input_flat.gather(1, x0_ix.add(y1_ix).detach())
vals_11 = input_flat.gather(1, x1_ix.add(y1_ix).detach())
xd = x - x0
yd = y - y0
xm = 1 - xd
ym = 1 - yd
x_mapped = (vals_00.mul(xm).mul(ym) +
vals_10.mul(xd).mul(ym) +
vals_01.mul(xm).mul(yd) +
vals_11.mul(xd).mul(yd))
return x_mapped.view_as(input)
def th_bilinear_interp2d(input, coords):
"""
bilinear interpolation in 2d
"""
x = th.clamp(coords[:,:,0], 0, input.size(1)-2)
x0 = x.floor()
x1 = x0 + 1
y = th.clamp(coords[:,:,1], 0, input.size(2)-2)
y0 = y.floor()
y1 = y0 + 1
stride = th.LongTensor(input.stride())
x0_ix = x0.mul(stride[1]).long()
x1_ix = x1.mul(stride[1]).long()
y0_ix = y0.mul(stride[2]).long()
y1_ix = y1.mul(stride[2]).long()
input_flat = input.view(input.size(0),-1)
vals_00 = input_flat.gather(1, x0_ix.add(y0_ix))
vals_10 = input_flat.gather(1, x1_ix.add(y0_ix))
vals_01 = input_flat.gather(1, x0_ix.add(y1_ix))
vals_11 = input_flat.gather(1, x1_ix.add(y1_ix))
xd = x - x0
yd = y - y0
xm = 1 - xd
ym = 1 - yd
x_mapped = (vals_00.mul(xm).mul(ym) +
vals_10.mul(xd).mul(ym) +
vals_01.mul(xm).mul(yd) +
vals_11.mul(xd).mul(yd))
return x_mapped.view_as(input)
def forward(self, input_vb):
# NOTE: the operation order must be the following: control, access{write, read}, output
# 1. first feed {input, read_vec_{t-1}} to controller
hidden_vb = self.controller.forward(input_vb, self.read_vec_vb)
# 2. then we write to memory_{t-1} to get memory_{t}; then read from memory_{t} to get read_vec_{t}
self.read_vec_vb = self.accessor.forward(hidden_vb)
# 3. finally we concat the output from the controller and the current read_vec_{t} to get the final output
output_vb = self.hid_to_out(torch.cat((hidden_vb.view(-1, self.hidden_dim),
self.read_vec_vb.view(-1, self.read_vec_dim)), 1))
# we clip the output values here
return F.sigmoid(torch.clamp(output_vb, min=-self.clip_value, max=self.clip_value)).view(1, self.batch_size, self.output_dim)