def backward_gpu(self, inputs, grad_outputs):
cupy = cuda.cupy
x, t = inputs
if hasattr(self, 'y'):
y = self.y
else:
y = softmax_log(x, self.use_cudnn)
cupy.exp(y, out=y)
gloss = grad_outputs[0]
n_unit = t.size // len(t)
coeff = gloss * self._coeff
gx = cuda.elementwise(
'T y, S t, raw T coeff, S n_channel, S n_unit',
'T gx',
'''
const int c = (i / n_unit % n_channel);
gx = (t == -1) ? 0 : (coeff[0] * (y - (c == t)));
''',
'softmax_crossent_bwd')(
y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)
return gx, None
评论列表
文章目录