def forward(self, input, target):
buffer = input.new()
buffer.resize_as_(input).copy_(input)
buffer[torch.eq(target, -1.)] = 0
output = buffer.sum()
buffer.fill_(self.margin).add_(-1, input)
buffer.cmax_(0)
buffer[torch.eq(target, 1.)] = 0
output += buffer.sum()
if self.size_average:
output = output / input.nelement()
self.save_for_backward(input, target)
return input.new((output,))
python类eq()的实例源码
def forward(self, input, target):
buffer = input.new()
buffer.resize_as_(input).copy_(input)
buffer[torch.eq(target, -1.)] = 0
output = buffer.sum()
buffer.fill_(self.margin).add_(-1, input)
buffer.clamp_(min=0)
buffer[torch.eq(target, 1.)] = 0
output += buffer.sum()
if self.size_average:
output = output / input.nelement()
self.save_for_backward(input, target)
return input.new((output,))
def updateOutput(self, input, y):
if self.buffer is None:
self.buffer = input.new()
self.buffer.resize_as_(input).copy_(input)
self.buffer[torch.eq(y, -1.)] = 0
self.output = self.buffer.sum()
self.buffer.fill_(self.margin).add_(-1, input)
self.buffer.clamp_(min=0)
self.buffer[torch.eq(y, 1.)] = 0
self.output = self.output + self.buffer.sum()
if self.sizeAverage:
self.output = self.output / input.nelement()
return self.output
def forward(self, input, target):
buffer = input.new()
buffer.resize_as_(input).copy_(input)
buffer[torch.eq(target, -1.)] = 0
output = buffer.sum()
buffer.fill_(self.margin).add_(-1, input)
buffer.clamp_(min=0)
buffer[torch.eq(target, 1.)] = 0
output += buffer.sum()
if self.size_average:
output = output / input.nelement()
self.save_for_backward(input, target)
return input.new((output,))
def updateOutput(self, input, y):
if self.buffer is None:
self.buffer = input.new()
self.buffer.resize_as_(input).copy_(input)
self.buffer[torch.eq(y, -1.)] = 0
self.output = self.buffer.sum()
self.buffer.fill_(self.margin).add_(-1, input)
self.buffer.clamp_(min=0)
self.buffer[torch.eq(y, 1.)] = 0
self.output = self.output + self.buffer.sum()
if self.sizeAverage:
self.output = self.output / input.nelement()
return self.output
def forward(ctx, input, target, margin, size_average):
ctx.margin = margin
ctx.size_average = size_average
buffer = input.new()
buffer.resize_as_(input).copy_(input)
buffer[torch.eq(target, -1.)] = 0
output = buffer.sum()
buffer.fill_(ctx.margin).add_(-1, input)
buffer.clamp_(min=0)
buffer[torch.eq(target, 1.)] = 0
output += buffer.sum()
if ctx.size_average:
output = output / input.nelement()
ctx.save_for_backward(input, target)
return input.new((output,))
def updateOutput(self, input, y):
if self.buffer is None:
self.buffer = input.new()
self.buffer.resize_as_(input).copy_(input)
self.buffer[torch.eq(y, -1.)] = 0
self.output = self.buffer.sum()
self.buffer.fill_(self.margin).add_(-1, input)
self.buffer.clamp_(min=0)
self.buffer[torch.eq(y, 1.)] = 0
self.output = self.output + self.buffer.sum()
if self.sizeAverage:
self.output = self.output / input.nelement()
return self.output
def decode(self, input_word, input_char, target=None, mask=None, length=None, hx=None, leading_symbolic=0):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
if target is None:
return self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic), None
if length is not None:
max_len = length.max()
target = target[:, :max_len]
preds = self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic)
if mask is None:
return preds, torch.eq(preds, target.data).float().sum()
else:
return preds, (torch.eq(preds, target.data).float() * mask.data).sum()
def forward(ctx, input, target, margin, size_average):
ctx.margin = margin
ctx.size_average = size_average
buffer = input.new()
buffer.resize_as_(input).copy_(input)
buffer[torch.eq(target, -1.)] = 0
output = buffer.sum()
buffer.fill_(ctx.margin).add_(-1, input)
buffer.clamp_(min=0)
buffer[torch.eq(target, 1.)] = 0
output += buffer.sum()
if ctx.size_average:
output = output / input.nelement()
ctx.save_for_backward(input, target)
return input.new((output,))
def updateOutput(self, input, y):
if self.buffer is None:
self.buffer = input.new()
self.buffer.resize_as_(input).copy_(input)
self.buffer[torch.eq(y, -1.)] = 0
self.output = self.buffer.sum()
self.buffer.fill_(self.margin).add_(-1, input)
self.buffer.clamp_(min=0)
self.buffer[torch.eq(y, 1.)] = 0
self.output = self.output + self.buffer.sum()
if self.sizeAverage:
self.output = self.output / input.nelement()
return self.output
def backward(self, grad_output):
v1, v2, y = self.saved_tensors
buffer = v1.new()
_idx = self._new_idx(v1)
gw1 = grad_output.new()
gw2 = grad_output.new()
gw1.resize_as_(v1).copy_(v2)
gw2.resize_as_(v1).copy_(v1)
torch.mul(buffer, self.w1, self.w22)
gw1.addcmul_(-1, buffer.expand_as(v1), v1)
gw1.mul_(self.w.expand_as(v1))
torch.mul(buffer, self.w1, self.w32)
gw2.addcmul_(-1, buffer.expand_as(v1), v2)
gw2.mul_(self.w.expand_as(v1))
torch.le(_idx, self._outputs, 0)
_idx = _idx.view(-1, 1).expand(gw1.size())
gw1[_idx] = 0
gw2[_idx] = 0
torch.eq(_idx, y, 1)
_idx = _idx.view(-1, 1).expand(gw2.size())
gw1[_idx] = gw1[_idx].mul_(-1)
gw2[_idx] = gw2[_idx].mul_(-1)
if self.size_average:
gw1.div_(y.size(0))
gw2.div_(y.size(0))
if grad_output[0] != 1:
gw1.mul_(grad_output)
gw2.mul_(grad_output)
return gw1, gw2, None
def backward(self, grad_output):
input, target = self.saved_tensors
grad_input = input.new().resize_as_(input).copy_(target)
grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, self.margin))] = 0
if self.size_average:
grad_input.mul_(1. / input.nelement())
if grad_output[0] != 1:
grad_input.mul_(grad_output[0])
return grad_input, None
def updateOutput(self, input, y):
self.buffer = self.buffer or input.new()
self.buffer.resize_as_(input).copy_(input)
self.buffer[torch.eq(y, -1.)] = 0
self.output = self.buffer.sum()
self.buffer.fill_(self.margin).add_(-1, input)
self.buffer.cmax_(0)
self.buffer[torch.eq(y, 1.)] = 0
self.output = self.output + self.buffer.sum()
if self.sizeAverage:
self.output = self.output / input.nelement()
return self.output
def updateGradInput(self, input, y):
self.gradInput.resize_as_(input).copy_(y)
self.gradInput[torch.mul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0
if self.sizeAverage:
self.gradInput.mul_(1. / input.nelement())
return self.gradInput
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def forward(self, input1, input2, y):
self.w1 = input1.new()
self.w22 = input1.new()
self.w = input1.new()
self.w32 = input1.new()
self._outputs = input1.new()
_idx = input1.new().byte()
buffer = torch.mul(input1, input2)
torch.sum(buffer, 1, out=self.w1)
epsilon = 1e-12
torch.mul(input1, input1, out=buffer)
torch.sum(buffer, 1, out=self.w22).add_(epsilon)
self._outputs.resize_as_(self.w22).fill_(1)
torch.div(self._outputs, self.w22, out=self.w22)
self.w.resize_as_(self.w22).copy_(self.w22)
torch.mul(input2, input2, out=buffer)
torch.sum(buffer, 1, out=self.w32).add_(epsilon)
torch.div(self._outputs, self.w32, out=self.w32)
self.w.mul_(self.w32)
self.w.sqrt_()
torch.mul(self.w1, self.w, out=self._outputs)
self._outputs = self._outputs.select(1, 0)
torch.eq(y, -1, out=_idx)
self._outputs[_idx] = self._outputs[_idx].add_(-self.margin).clamp_(min=0)
torch.eq(y, 1, out=_idx)
self._outputs[_idx] = self._outputs[_idx].mul_(-1).add_(1)
output = self._outputs.sum()
if self.size_average:
output = output / y.size(0)
self.save_for_backward(input1, input2, y)
return input1.new((output,))
def backward(self, grad_output):
v1, v2, y = self.saved_tensors
buffer = v1.new()
_idx = v1.new().byte()
gw1 = grad_output.new()
gw2 = grad_output.new()
gw1.resize_as_(v1).copy_(v2)
gw2.resize_as_(v1).copy_(v1)
torch.mul(self.w1, self.w22, out=buffer)
gw1.addcmul_(-1, buffer.expand_as(v1), v1)
gw1.mul_(self.w.expand_as(v1))
torch.mul(self.w1, self.w32, out=buffer)
gw2.addcmul_(-1, buffer.expand_as(v1), v2)
gw2.mul_(self.w.expand_as(v1))
torch.le(self._outputs, 0, out=_idx)
_idx = _idx.view(-1, 1).expand(gw1.size())
gw1[_idx] = 0
gw2[_idx] = 0
torch.eq(y, 1, out=_idx)
_idx = _idx.view(-1, 1).expand(gw2.size())
gw1[_idx] = gw1[_idx].mul_(-1)
gw2[_idx] = gw2[_idx].mul_(-1)
if self.size_average:
gw1.div_(y.size(0))
gw2.div_(y.size(0))
grad_output_val = grad_output[0]
if grad_output_val != 1:
gw1.mul_(grad_output_val)
gw2.mul_(grad_output_val)
return gw1, gw2, None
def backward(self, grad_output):
input, target = self.saved_tensors
grad_input = input.new().resize_as_(input).copy_(target)
grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, self.margin))] = 0
if self.size_average:
grad_input.mul_(1. / input.nelement())
if grad_output[0] != 1:
grad_input.mul_(grad_output[0])
return grad_input, None
def updateGradInput(self, input, y):
self.gradInput.resize_as_(input).copy_(y)
self.gradInput[torch.mul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0
if self.sizeAverage:
self.gradInput.mul_(1. / input.nelement())
return self.gradInput
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def forward(self, input1, input2, y):
self.w1 = input1.new()
self.w22 = input1.new()
self.w = input1.new()
self.w32 = input1.new()
self._outputs = input1.new()
_idx = input1.new().byte()
buffer = torch.mul(input1, input2)
torch.sum(buffer, 1, out=self.w1, keepdim=True)
epsilon = 1e-12
torch.mul(input1, input1, out=buffer)
torch.sum(buffer, 1, out=self.w22, keepdim=True).add_(epsilon)
self._outputs.resize_as_(self.w22).fill_(1)
torch.div(self._outputs, self.w22, out=self.w22)
self.w.resize_as_(self.w22).copy_(self.w22)
torch.mul(input2, input2, out=buffer)
torch.sum(buffer, 1, out=self.w32, keepdim=True).add_(epsilon)
torch.div(self._outputs, self.w32, out=self.w32)
self.w.mul_(self.w32)
self.w.sqrt_()
torch.mul(self.w1, self.w, out=self._outputs)
self._outputs = self._outputs.select(1, 0)
torch.eq(y, -1, out=_idx)
self._outputs[_idx] = self._outputs[_idx].add_(-self.margin).clamp_(min=0)
torch.eq(y, 1, out=_idx)
self._outputs[_idx] = self._outputs[_idx].mul_(-1).add_(1)
output = self._outputs.sum()
if self.size_average:
output = output / y.size(0)
self.save_for_backward(input1, input2, y)
return input1.new((output,))
def backward(self, grad_output):
v1, v2, y = self.saved_tensors
buffer = v1.new()
_idx = v1.new().byte()
gw1 = grad_output.new()
gw2 = grad_output.new()
gw1.resize_as_(v1).copy_(v2)
gw2.resize_as_(v1).copy_(v1)
torch.mul(self.w1, self.w22, out=buffer)
gw1.addcmul_(-1, buffer.expand_as(v1), v1)
gw1.mul_(self.w.expand_as(v1))
torch.mul(self.w1, self.w32, out=buffer)
gw2.addcmul_(-1, buffer.expand_as(v1), v2)
gw2.mul_(self.w.expand_as(v1))
torch.le(self._outputs, 0, out=_idx)
_idx = _idx.view(-1, 1).expand(gw1.size())
gw1[_idx] = 0
gw2[_idx] = 0
torch.eq(y, 1, out=_idx)
_idx = _idx.view(-1, 1).expand(gw2.size())
gw1[_idx] = gw1[_idx].mul_(-1)
gw2[_idx] = gw2[_idx].mul_(-1)
if self.size_average:
gw1.div_(y.size(0))
gw2.div_(y.size(0))
grad_output_val = grad_output[0]
if grad_output_val != 1:
gw1.mul_(grad_output_val)
gw2.mul_(grad_output_val)
return gw1, gw2, None
def backward(self, grad_output):
input, target = self.saved_tensors
grad_input = input.new().resize_as_(input).copy_(target)
grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, self.margin))] = 0
if self.size_average:
grad_input.mul_(1. / input.nelement())
if grad_output[0] != 1:
grad_input.mul_(grad_output[0])
return grad_input, None
def updateGradInput(self, input, y):
self.gradInput.resize_as_(input).copy_(y)
self.gradInput[torch.mul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0
if self.sizeAverage:
self.gradInput.mul_(1. / input.nelement())
return self.gradInput
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def backward(ctx, grad_output):
v1, v2, y = ctx.saved_tensors
buffer = v1.new()
_idx = v1.new().byte()
gw1 = grad_output.new()
gw2 = grad_output.new()
gw1.resize_as_(v1).copy_(v2)
gw2.resize_as_(v1).copy_(v1)
torch.mul(ctx.w1, ctx.w22, out=buffer)
gw1.addcmul_(-1, buffer.expand_as(v1), v1)
gw1.mul_(ctx.w.expand_as(v1))
torch.mul(ctx.w1, ctx.w32, out=buffer)
gw2.addcmul_(-1, buffer.expand_as(v1), v2)
gw2.mul_(ctx.w.expand_as(v1))
torch.le(ctx._outputs, 0, out=_idx)
_idx = _idx.view(-1, 1).expand(gw1.size())
gw1[_idx] = 0
gw2[_idx] = 0
torch.eq(y, 1, out=_idx)
_idx = _idx.view(-1, 1).expand(gw2.size())
gw1[_idx] = gw1[_idx].mul_(-1)
gw2[_idx] = gw2[_idx].mul_(-1)
if ctx.size_average:
gw1.div_(y.size(0))
gw2.div_(y.size(0))
grad_output_val = grad_output[0]
if grad_output_val != 1:
gw1.mul_(grad_output_val)
gw2.mul_(grad_output_val)
return gw1, gw2, None, None, None
def forward(ctx, input, target, grad_output, margin, size_average):
ctx.margin = margin
ctx.size_average = size_average
ctx.save_for_backward(input, target, grad_output)
grad_input = input.new().resize_as_(input).copy_(target)
grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, ctx.margin))] = 0
if ctx.size_average:
grad_input.mul_(1. / input.nelement())
if grad_output[0] != 1:
grad_input.mul_(grad_output[0])
return grad_input
def updateGradInput(self, input, y):
self.gradInput.resize_as_(input).copy_(y)
self.gradInput[torch.mul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0
if self.sizeAverage:
self.gradInput.mul_(1. / input.nelement())
return self.gradInput