def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
python类clamp()的实例源码
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
attack_carlini_wagner_l2.py 文件源码
项目:pytorch-nips2017-attack-example
作者: rwightman
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def _loss(self, output, target, dist, scale_const):
# compute the probability of the label class versus the maximum other
real = (target * output).sum(1)
other = ((1. - target) * output - target * 10000.).max(1)[0]
if self.targeted:
# if targeted, optimize for making the other class most likely
loss1 = torch.clamp(other - real + self.confidence, min=0.) # equiv to max(..., 0.)
else:
# if non-targeted, optimize for making this class least likely.
loss1 = torch.clamp(real - other + self.confidence, min=0.) # equiv to max(..., 0.)
loss1 = torch.sum(scale_const * loss1)
loss2 = dist.sum()
loss = loss1 + loss2
return loss
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
#pdb.set_trace()
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
r"""Returns cosine similarity between x1 and x2, computed along dim.
Args:
x1 (Variable): First input.
x2 (Variable): Second input (of size matching x1).
dim (int, optional): Dimension of vectors. Default: 1
eps (float, optional): Small value to avoid division by zero. Default: 1e-8
Shape:
- Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
- Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def normalize(input, p=2, dim=1, eps=1e-12):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector,
i.e. :math:`\lVert v \rVert_p` is not a matrix norm.
With default arguments normalizes over the second dimension with Euclidean norm.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation
dim (int): the dimension to reduce
eps (float): small value to avoid division by zero
"""
return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def normalize(input, p=2, dim=1, eps=1e-12):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
"""
return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
def th_nearest_interp2d(input, coords):
"""
2d nearest neighbor interpolation th.Tensor
"""
# take clamp of coords so they're in the image bounds
x = th.clamp(coords[:,:,0], 0, input.size(1)-1).round()
y = th.clamp(coords[:,:,1], 0, input.size(2)-1).round()
stride = th.LongTensor(input.stride())
x_ix = x.mul(stride[1]).long()
y_ix = y.mul(stride[2]).long()
input_flat = input.view(input.size(0),-1)
mapped_vals = input_flat.gather(1, x_ix.add(y_ix))
return mapped_vals.view_as(input)
def th_nearest_interp3d(input, coords):
"""
2d nearest neighbor interpolation th.Tensor
"""
# take clamp of coords so they're in the image bounds
coords[:,0] = th.clamp(coords[:,0], 0, input.size(1)-1).round()
coords[:,1] = th.clamp(coords[:,1], 0, input.size(2)-1).round()
coords[:,2] = th.clamp(coords[:,2], 0, input.size(3)-1).round()
stride = th.LongTensor(input.stride())[1:].float()
idx = coords.mv(stride).long()
input_flat = th_flatten(input)
mapped_vals = input_flat[idx]
return mapped_vals.view_as(input)
def normalize(input, p=2, dim=1, eps=1e-12):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
"""
return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
r"""Returns cosine similarity between x1 and x2, computed along dim.
Args:
x1 (Variable): First input.
x2 (Variable): Second input (of size matching x1).
dim (int, optional): Dimension of vectors. Default: 1
eps (float, optional): Small value to avoid division by zero. Default: 1e-8
Shape:
- Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
- Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def forward(self, anchor, positive, negative):
#eucl distance
#dist = torch.sum( (anchor - positive) ** 2 - (anchor - negative) ** 2, dim=1)\
# + self.margin
if self.dist_type == 0:
dist_p = F.pairwise_distance(anchor ,positive)
dist_n = F.pairwise_distance(anchor ,negative)
if self.dist_type == 1:
dist_p = cosine_similarity(anchor, positive)
disp_n = cosine_similarity(anchor, negative)
dist_hinge = torch.clamp(dist_p - dist_n + self.margin, min=0.0)
if self.use_ohem:
v, idx = torch.sort(dist_hinge,descending=True)
loss = torch.mean(v[0:self.ohem_bs])
else:
loss = torch.mean(dist_hinge)
return loss
def clip(tensor: T.FloatTensor,
a_min: T.Scalar = -numpy.inf,
a_max: T.Scalar = numpy.inf) -> T.FloatTensor:
"""
Return a tensor with its values clipped between a_min and a_max.
Args:
tensor: A tensor.
a_min (optional): The desired lower bound on the elements of the tensor.
a_max (optional): The desired upper bound on the elements of the tensor.
Returns:
tensor: A new tensor with its values clipped between a_min and a_max.
"""
return tensor.clamp(a_min, a_max)
def clip_inplace(tensor: T.FloatTensor,
a_min: T.Scalar = -numpy.inf,
a_max: T.Scalar = numpy.inf) -> None:
"""
Clip the values of a tensor between a_min and a_max.
Note:
Modifies tensor in place.
Args:
tensor: A tensor.
a_min (optional): The desired lower bound on the elements of the tensor.
a_max (optional): The desired upper bound on the elements of the tensor.
Returns:
None
"""
return torch.clamp(tensor, a_min, a_max, out=tensor)
def hard_negative_mining(self, conf_loss, pos):
'''Return negative indices that is 3x the number as postive indices.
Args:
conf_loss: (tensor) cross entroy loss between conf_preds and conf_targets, sized [N*8732,].
pos: (tensor) positive(matched) box indices, sized [N,8732].
Return:
(tensor) negative indices, sized [N,8732].
'''
batch_size, num_boxes = pos.size()
# print(pos)
# print(conf_loss.size())
conf_loss[pos] = 0 # set pos boxes = 0, the rest are neg conf_loss
conf_loss = conf_loss.view(batch_size, -1) # [N,8732]
_,idx = conf_loss.sort(1, descending=True) # sort by neg conf_loss
_,rank = idx.sort(1) # [N,8732]
num_pos = pos.long().sum(1) # [N,1]
num_neg = torch.clamp(3*num_pos, max=num_boxes-1) # [N,1]
neg = rank < num_neg.view(-1, 1).expand_as(rank) # [N,8732]
return neg
def select_action(self, state_batch):
# state_batch: n_agents x state_dim
actions = Variable(th.zeros(
self.n_agents,
self.n_actions))
FloatTensor = th.cuda.FloatTensor if self.use_cuda else th.FloatTensor
for i in range(self.n_agents):
sb = state_batch[i, :].detach()
act = self.actors[i](sb.unsqueeze(0)).squeeze()
act += Variable(
th.from_numpy(
np.random.randn(2) * self.var[i]).type(FloatTensor))
if self.episode_done > self.episodes_before_train and\
self.var[i] > 0.05:
self.var[i] *= 0.999998
act = th.clamp(act, -1.0, 1.0)
actions[i, :] = act
self.steps_done += 1
return actions
def get_update(self):
actions, log_actions, rewards, critics, entropies, states, advantages = self._sample()
# Compute auxiliary losses
critics = self.critic(states)
critic_loss = (rewards - critics).pow(2).mean()
critic_loss = self.critic_weight * critic_loss
entropy_loss = entropies.mean()
entropy_loss = - self.entropy_weight * entropy_loss
# Compute policy loss
advantages = advantages.detach().view(-1, 1)
new_actions = self.policy(states)
log_probs = new_actions.compute_log_prob(actions)
ratios = (log_probs - log_actions.detach()).exp()
surr = ratios.view(-1, 1) * advantages
clipped = th.clamp(ratios, 1.0 - self.clip, 1.0 + self.clip).view(-1, 1) * advantages
policy_loss = - th.min(surr, clipped).mean()
# Proceed to optimization
loss = policy_loss + critic_loss + entropy_loss
if self.epoch_optimized == self.num_epochs:
loss.backward(retain_graph=False)
else:
loss.backward(retain_graph=True)
if self.grad_clip > 0.0:
th.nn.utils.clip_grad_norm(self.parameters(), self.grad_clip)
# Store statistics
self.stats['Num. Updates'] += 1.0
self.stats['Critic Loss'] += critic_loss.data[0]
self.stats['Entropy Loss'] += entropy_loss.data[0]
self.stats['Policy Loss'] += policy_loss.data[0]
self.stats['Total Loss'] += loss.data[0]
return [p.grad.clone() for p in self.parameters()]
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor) and self.use_parallel:
output = nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
output = self.model(input)
if self.learn_residual:
output = input + output
output = torch.clamp(output,min = -1,max = 1)
return output
# Define a resnet block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor) and self.use_parallel:
output = nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
output = self.model(input)
if self.learn_residual:
output = input + output
output = torch.clamp(output,min = -1,max = 1)
return output
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc,
min=0.0).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc,
min=0.0).sum()
loss = cost_sent + cost_img
return loss
def postprocess_caffe(output):
output = output.data.cpu().clamp(0, 255).numpy()
output = output[0].transpose(1, 2, 0).astype('uint8')
output = output[..., ::-1]
output = Image.fromarray(output)
return output
def postprocess_torch(output):
# Should we?
def denormalize(image):
for t in range(3):
image[t, :, :] = (image[t, :, :] * STD[t]) + MEAN[t]
return image
transformer = transforms.Compose([
transforms.ToPILImage()])
image = output.cpu().data[0]
image = torch.clamp(denormalize(image), min=0, max=1)
return transformer(image)
def forward(self, x0, x1, y):
# euclidian distance
diff = x0 - x1
dist_sq = torch.sum(torch.pow(diff, 2), 1)
dist = torch.sqrt(dist_sq)
mdist = self.margin - dist
dist = torch.clamp(mdist, min=0.0)
loss = y * dist_sq + (1 - y) * torch.pow(dist, 2)
loss = torch.sum(loss) / 2.0 / x0.size()[0]
return loss
attack_carlini_wagner_l2.py 文件源码
项目:pytorch-nips2017-attack-example
作者: rwightman
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def __init__(self, targeted=True, search_steps=None, max_steps=None, cuda=True, debug=False):
self.debug = debug
self.targeted = targeted
self.num_classes = 1000
self.confidence = 20 # FIXME need to find a good value for this, 0 value used in paper not doing much...
self.initial_const = 0.1 # bumped up from default of .01 in reference code
self.binary_search_steps = search_steps or 5
self.repeat = self.binary_search_steps >= 10
self.max_steps = max_steps or 1000
self.abort_early = True
self.clip_min = -1.
self.clip_max = 1.
self.cuda = cuda
self.clamp_fn = 'tanh' # set to something else perform a simple clamp instead of tanh
self.init_rand = False # an experiment, does a random starting point help?
attack_carlini_wagner_l2.py 文件源码
项目:pytorch-nips2017-attack-example
作者: rwightman
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def _optimize(self, optimizer, model, input_var, modifier_var, target_var, scale_const_var, input_orig=None):
# apply modifier and clamp resulting image to keep bounded from clip_min to clip_max
if self.clamp_fn == 'tanh':
input_adv = tanh_rescale(modifier_var + input_var, self.clip_min, self.clip_max)
else:
input_adv = torch.clamp(modifier_var + input_var, self.clip_min, self.clip_max)
output = model(input_adv)
# distance to the original input data
if input_orig is None:
dist = l2_dist(input_adv, input_var, keepdim=False)
else:
dist = l2_dist(input_adv, input_orig, keepdim=False)
loss = self._loss(output, target_var, dist, scale_const_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_np = loss.data[0]
dist_np = dist.data.cpu().numpy()
output_np = output.data.cpu().numpy()
input_adv_np = input_adv.data.permute(0, 2, 3, 1).cpu().numpy() # back to BHWC for numpy consumption
return loss_np, dist_np, output_np, input_adv_np
def task_loss(Y_sched, Y_actual, params):
return (params["gamma_under"] * torch.clamp(Y_actual - Y_sched, min=0) +
params["gamma_over"] * torch.clamp(Y_sched - Y_actual, min=0) +
0.5 * (Y_sched - Y_actual)**2).mean(0)
def test_clamp(self):
m1 = torch.rand(100).mul(5).add(-2.5) # uniform in [-2.5, 2.5]
# just in case we're extremely lucky.
min_val = -1
max_val = 1
m1[1] = min_val
m1[2] = max_val
res1 = m1.clone()
res1.clamp_(min_val, max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, min(max_val, res2[i]))
self.assertEqual(res1, res2)
res1 = torch.clamp(m1, min=min_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, res2[i])
self.assertEqual(res1, res2)
res1 = torch.clamp(m1, max=max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = min(max_val, res2[i])
self.assertEqual(res1, res2)
def test_masked_select(self):
num_src = 10
src = torch.randn(num_src)
mask = torch.rand(num_src).clamp(0, 1).mul(2).floor().byte()
dst = src.masked_select(mask)
dst2 = []
for i in range(num_src):
if mask[i]:
dst2 += [src[i]]
self.assertEqual(dst, torch.Tensor(dst2), 0)