def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T low, T high',
'T p',
'p = (p < low) ? low : (p > high) ? high : p',
'weight_clip')
for link in opt.target.links():
# only apply to binary layers
if getattr(link,'cname',False):
for param in link.params():
p = param.data
with cuda.get_device(p) as dev:
if int(dev) == -1:
numpy.clip(p, self.low, self.high)
else:
kernel(self.low, self.high, p)
python类available()的实例源码
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T low, T high',
'T p',
'p = (p < low) ? low : (p > high) ? high : p',
'weight_clip')
for param in opt.target.params():
p = param.data
with cuda.get_device(p) as dev:
if int(dev) == -1:
numpy.clip(p, self.low, self.high)
else:
kernel(self.low, self.high, p)
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
rate = self.rate
for name, param in opt.target.namedparams():
if name == 'b' or name.endswith('/b'):
continue
p, g = param.data, param.grad
with cuda.get_device(p) as dev:
if int(dev) == -1:
g += rate * p
else:
kernel(p, rate, g)
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
rate = self.rate
for name, param in opt.target.namedparams():
if name == 'b' or name.endswith('/b'):
continue
p, g = param.data, param.grad
with cuda.get_device(p) as dev:
if int(dev) == -1:
g += rate * p
else:
kernel(p, rate, g)
def sample(self, trainer):
x = trainer.updater.sample()
x = x.data
if cuda.available and isinstance(x, cuda.ndarray):
x = cuda.to_cpu(x)
return x
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
rate = self.rate
for param in opt.target.params():
p, g = param.data, param.grad
with cuda.get_device(p) as dev:
if int(dev) == -1:
g += rate * p
else:
kernel(p, rate, g)
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T s, T decay', 'T g', 'g += decay * s', 'lasso')
rate = self.rate
for param in opt.target.params():
p, g = param.data, param.grad
xp = cuda.get_array_module(p)
sign = xp.sign(p)
with cuda.get_device(p) as dev:
if int(dev) == -1:
g += rate * sign
else:
kernel(sign, rate, g)
def empty_like(x):
if cuda.available and isinstance(x, cuda.ndarray):
return cuda.cupy.empty_like(x)
else:
return numpy.empty_like(x)
def test_get_dummy_device(self):
if not cuda.available:
self.assertIs(cuda.get_device(), cuda.DummyDevice)
def test_to_gpu_unavailable(self):
x = numpy.array([1])
if not cuda.available:
with self.assertRaises(RuntimeError):
cuda.to_gpu(x)
def test_empy_like_unavailable(self):
x = numpy.array([1])
if not cuda.available:
with self.assertRaises(RuntimeError):
cuda.empty_like(x)
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T low, T high',
'T p',
'p = (p < low) ? low : (p > high) ? high : p',
'weight_clip')
for param in opt.target.params():
p = param.data
with cuda.get_device(p) as dev:
if int(dev) == -1:
numpy.clip(p, self.low, self.high)
else:
kernel(self.low, self.high, p)
def __init__(self, blocking_method='non_block'):
if not cuda.available:
raise RuntimeError('CUDA must be available to use GPUTimer.')
if not (blocking_method == 'non_block' or
blocking_method == 'block_first_time' or
blocking_method == 'block_every_time'):
raise ValueError(
'Invalid blocking method:{}'.format(blocking_method))
self.blocking_method = blocking_method
self.reset()
def __call__(self, opt):
if cuda.available:
kernel = cuda.elementwise(
'T low, T high',
'T p',
'p = (p < low) ? low : (p > high) ? high : p',
'weight_clip')
for param in opt.target.params():
p = param.data
with cuda.get_device(p) as dev:
if int(dev) == -1:
numpy.clip(p, self.low, self.high)
else:
kernel(self.low, self.high, p)
def gpu_enabled(self):
if cuda.available is False:
return False
return self._gpu
def gpu_enabled(self):
if cuda.available is False:
return False
return self._gpu
def gpu_enabled(self):
if cuda.available is False:
return False
return self._gpu
def gpu_enabled(self):
if cuda.available is False:
return False
return self._gpu
def gpu(self):
if cuda.available is False:
return False
return True if self.xp is cuda.cupy else False
def gpu(self):
if cuda.available is False:
return False
return True if self.xp is cuda.cupy else False