def addgrad(self, var):
"""Accumulates the gradient array from given source variable.
This method just runs ``self.grad += var.grad``, except that the
accumulation is even done across the host and different devices.
Args:
var (Variable): Source variable.
"""
src = var._grad
dst = self._grad
if src is None:
raise ValueError('Source gradient is not set.')
if dst is None:
raise ValueError('Target graidient is not set.')
xp = cuda.get_array_module(dst)
if xp is numpy:
dst += cuda.to_cpu(src)
elif isinstance(src, numpy.ndarray):
dst += cuda.to_gpu(src, device=dst)
else:
dst_dev = dst.device
if dst_dev == src.device:
dst += src
else:
with dst_dev:
dst += xp.copy(src)
评论列表
文章目录