def init_hx(self, xs):
shape = (self.n_layers * self.direction, len(xs), self.out_size)
with cuda.get_device_from_id(self._device_id):
hx = variable.Variable(self.xp.zeros(shape, dtype=xs[0].dtype))
return hx
python类get_device_from_id()的实例源码
def __call__(self, x, gamma_=None, beta_=None):
if hasattr(self, 'gamma'):
gamma = self.gamma
elif gamma_ is not None:
gamma = gamma_
else:
with cuda.get_device_from_id(self._device_id):
gamma = variable.Variable(self.xp.ones(
self.avg_mean.shape, dtype=x.dtype))
if hasattr(self, 'beta'):
beta = self.beta
elif beta_ is not None:
beta = beta_
else:
with cuda.get_device_from_id(self._device_id):
beta = variable.Variable(self.xp.zeros(
self.avg_mean.shape, dtype=x.dtype))
decay = self.decay
if (not configuration.config.train) and self.valid_test:
mean = variable.Variable(self.avg_mean)
var = variable.Variable(self.avg_var)
ret = fixed_instance_normalization(
x, gamma, beta, mean, var, self.eps)
else:
func = InstanceNormalizationFunction(
self.eps, self.avg_mean, self.avg_var, decay)
ret = func(x, gamma, beta)
self.avg_mean = func.running_mean
self.avg_var = func.running_var
return ret
def fit(self, content_image, style_image, epoch_num, callback=None):
device_id = None
if self.device_id >= 0:
device_id = self.device_id
with cuda.get_device_from_id(device_id):
return self.__fit(content_image, style_image, epoch_num, callback)
def fit(self, content_image, style_image, epoch_num, callback=None):
device_id = None
if self.device_id >= 0:
device_id = self.device_id
with cuda.get_device_from_id(device_id):
return self.__fit(content_image, style_image, epoch_num, callback)
def __call__(self, x, finetune=False):
if hasattr(self, 'gamma'):
gamma = self.gamma
else:
with cuda.get_device_from_id(self._device_id):
gamma = variable.Variable(self.xp.ones(
self.avg_mean.shape, dtype=x.dtype))
if hasattr(self, 'beta'):
beta = self.beta
else:
with cuda.get_device_from_id(self._device_id):
beta = variable.Variable(self.xp.zeros(
self.avg_mean.shape, dtype=x.dtype))
if chainer.configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
func = MultiNodeBatchNormalizationFunction(
self.comm, self.eps, self.avg_mean, self.avg_var, decay)
ret = func(x, gamma, beta)
self.avg_mean[:] = func.running_mean
self.avg_var[:] = func.running_var
else:
# Use running average statistics or fine-tuned statistics.
mean = variable.Variable(self.avg_mean)
var = variable.Variable(self.avg_var)
ret = batch_normalization.fixed_batch_normalization(
x, gamma, beta, mean, var, self.eps)
return ret
def init_hx(self, xs):
hx_shape = self.n_layers * self.direction
with cuda.get_device_from_id(self._device_id):
if args.chainer_version_check[0] == 2:
hx = chainer.Variable(
self.xp.zeros((hx_shape, xs.data.shape[1], self.out_size),
dtype=xs.dtype))
else:
hx = chainer.Variable(
self.xp.zeros((hx_shape, xs.data.shape[1], self.out_size),
dtype=xs.dtype),
volatile='auto')
return hx
def __init__(self, model, optimizer,
gpu=None,
gamma=0.99,
lambd=0.95,
phi=lambda x: x,
value_func_coef=1.0,
entropy_coef=0.01,
update_interval=2048,
minibatch_size=64,
epochs=10,
clip_eps=0.2,
clip_eps_vf=None,
standardize_advantages=True,
average_v_decay=0.999, average_loss_decay=0.99,
):
self.model = model
if gpu is not None and gpu >= 0:
cuda.get_device_from_id(gpu).use()
self.model.to_gpu(device=gpu)
self.optimizer = optimizer
self.gamma = gamma
self.lambd = lambd
self.phi = phi
self.value_func_coef = value_func_coef
self.entropy_coef = entropy_coef
self.update_interval = update_interval
self.minibatch_size = minibatch_size
self.epochs = epochs
self.clip_eps = clip_eps
self.clip_eps_vf = clip_eps_vf
self.standardize_advantages = standardize_advantages
self.average_v = 0
self.average_v_decay = average_v_decay
self.average_loss_policy = 0
self.average_loss_value_func = 0
self.average_loss_entropy = 0
self.average_loss_decay = average_loss_decay
self.xp = self.model.xp
self.last_state = None
self.memory = []
self.last_episode = []