def updateGradInput(self, input, gradOutput):
if not self.gradInput:
return
self._div = self._div or input.new()
self._output = self._output or self.output.new()
self._expand4 = self._expand4 or input.new()
self._gradOutput = self._gradOutput or input.new()
if not self.fastBackward:
self.updateOutput(input)
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j -2 * c_j * c_j * (w_j - x) c_j * c_j * (x - w_j)
---- = -------------------------- = ---------------------
dx 2 || c_j * (w_j - x) || y_j
"""
# to prevent div by zero (NaN) bugs
self._output.resize_as_(self.output).copy_(self.output).add_(1e-7)
self._view(self._gradOutput, gradOutput, gradOutput.size())
torch.div(self._div, gradOutput, self._output)
if input.dim() == 1:
self._div.resize_(1, outputSize)
self._expand4 = self._div.expand_as(self.weight)
if torch.type(input) == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
else:
self._repeat2.mul_(self._repeat, self._expand4)
self._repeat2.mul_(self.diagCov)
torch.sum(self.gradInput, self._repeat2, 1)
self.gradInput.resize_as_(input)
elif input.dim() == 2:
batchSize = input.size(0)
self._div.resize_(batchSize, 1, outputSize)
self._expand4 = self._div.expand(batchSize, inputSize, outputSize)
if input.type() == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
self._repeat2.mul_(self._repeat3)
else:
torch.mul(self._repeat2, self._repeat, self._expand4)
self._repeat2.mul_(self._expand3)
torch.sum(self.gradInput, self._repeat2, 2)
self.gradInput.resize_as_(input)
else:
raise RuntimeError("1D or 2D input expected")
return self.gradInput
评论列表
文章目录