def forward(self, xs):
x = xs[0]
xp = cuda.get_array_module(x)
if (xp != numpy and cuda.cudnn_enabled and self.use_cudnn and
_cudnn_version >= 3000):
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
handle = cudnn.get_handle()
x_cube = x.reshape(x.shape[:2] + (-1, 1))
desc = cudnn.create_tensor_descriptor(x_cube)
self.y = xp.empty_like(x)
libcudnn.softmaxForward(
handle, _algorithm, _mode, one.data, desc.value,
x_cube.data.ptr, zero.data, desc.value,
self.y.data.ptr)
return self.y,
else:
log_z = logsumexp(x)
self.y = x - log_z
return self.y,
评论列表
文章目录