def concat(x,y,train='on'):
xdim = 1
xp=Deel.xp
if Deel.gpu>=0:
x = cuda.to_cpu(x)
y= cuda.to_cpu(y)
x = x.copy()
y = y.copy()
for n in x.shape:
xdim *= n
if len(x.shape)>1:
x = x.reshape((xdim,))
ydim=1
for n in y.shape:
ydim *= n
if len(y.shape)>1:
y = y.reshape((ydim,))
z = np.r_[x,y]
z = xp.asarray(z,dtype=xp.float32)
return z
python类to_cpu()的实例源码
def evaluate(dataset, model, args, n_query_data=None):
pool, modelL = make_pool(model, args.n_pool)
correct_per, sub_correct_per, n_choice_per = 0., 0., 0.
sum_loss_data = xp.zeros(())
idsL = model.make_efficient_chunk(list(six.moves.range(len(dataset))), dataset)
all_datasL = [[dataset[idx] for idx in ids] for ids in idsL]
# Split dataset into some part
n_ch = len(all_datasL[0])/6+1
for j in six.moves.range(6):
datasL = [each_datas[j*n_ch:(j+1)*n_ch] for each_datas in all_datasL]
for result in pool.imap_unordered(
wrapper_solve, zip(modelL, datasL, [False]*args.n_pool)):
sum_loss_one, n_T, n_choice, n_s = result
sum_loss_data += sum_loss_one
correct_per += n_T
sub_correct_per += n_s
n_choice_per += n_choice
if n_query_data is None:
n_query_data = sum([len(v_["queries"]) for v_ in dataset])
pool.close()
return cuda.to_cpu(sum_loss_data) / n_query_data, correct_per, n_choice_per, sub_correct_per
test_graph_convolution.py 文件源码
项目:chainer-graph-cnn
作者: pfnet-research
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def test_forward_consistency(self, nobias=False):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
func_cpu = graph_convolution.GraphConvolutionFunction(self.L, self.K)
func_cpu.to_cpu()
args_cpu = (x_cpu, W_cpu)
if b_cpu is not None:
args_cpu += (b_cpu, )
y_cpu = func_cpu(*args_cpu)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
func_gpu = graph_convolution.GraphConvolutionFunction(self.L, self.K)
func_gpu.to_gpu()
args_gpu = (x_gpu, W_gpu)
if b_gpu is not None:
args_gpu += (b_gpu, )
y_gpu = func_gpu(*args_gpu)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
def visualize(gen, epoch, savedir, batch_size=36, image_type='sigmoid'):
z = chainer.Variable(gen.xp.asarray(gen.make_hidden(batch_size)), volatile=True)
x_fake = gen(z, train=False)
if image_type == 'sigmoid':
img_gen = ((cuda.to_cpu(x_fake.data)) * 255).clip(0, 255).astype(np.uint8)
else:
img_gen = ((cuda.to_cpu(x_fake.data) + 1) * 127.5).clip(0, 255).astype(np.uint8)
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(36):
ax = fig.add_subplot(6, 6, i + 1, xticks=[], yticks=[])
ax.imshow(img_gen[i].transpose(1, 2, 0))
fig.savefig('{}/generate_{:03d}'.format(savedir, epoch))
# plt.show()
plt.close()
def visualize(gen, epoch, savedir, batch_size=36, image_type='sigmoid'):
z = chainer.Variable(gen.xp.asarray(gen.make_hidden(batch_size)), volatile=True)
x_fake = gen(z, train=False)
if image_type == 'sigmoid':
img_gen = ((cuda.to_cpu(x_fake.data)) * 255).clip(0, 255).astype(np.uint8)
else:
img_gen = ((cuda.to_cpu(x_fake.data) + 1) * 127.5).clip(0, 255).astype(np.uint8)
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(36):
ax = fig.add_subplot(6, 6, i + 1, xticks=[], yticks=[])
ax.imshow(img_gen[i].transpose(1, 2, 0))
fig.savefig('{}/generate_{:03d}'.format(savedir, epoch))
# plt.show()
plt.close()
def visualize(gen, epoch, savedir, batch_size=36, image_type='sigmoid'):
z = chainer.Variable(gen.xp.asarray(gen.make_hidden(batch_size)), volatile=True)
x_fake = gen(z, train=False)
if image_type == 'sigmoid':
img_gen = ((cuda.to_cpu(x_fake.data)) * 255).clip(0, 255).astype(np.uint8)
else:
img_gen = ((cuda.to_cpu(x_fake.data) + 1) * 127.5).clip(0, 255).astype(np.uint8)
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(36):
ax = fig.add_subplot(6, 6, i + 1, xticks=[], yticks=[])
ax.imshow(img_gen[i].transpose(1, 2, 0))
fig.savefig('{}/generate_{:03d}'.format(savedir, epoch))
# plt.show()
plt.close()
def _update_d_and_f(self, state):
d, f = state['d'], state['f']
if self.t > 1:
old_f = float(cuda.to_cpu(state['f']))
if self.loss > old_f:
delta = self.lower_threshold + 1.
Delta = self.upper_threshold + 1.
else:
delta = 1. / (self.upper_threshold + 1.)
Delta = 1. / (self.lower_threshold + 1.)
c = min(max(delta, self.loss / (old_f + 1e-12)), Delta)
new_f = c * old_f
r = abs(new_f - old_f) / (min(new_f, old_f) + 1e-12)
d += (1 - self.beta3) * (r - d)
f[:] = new_f
else:
f[:] = self.loss
def copy_to_cpu(imgs):
if type(imgs) == chainer.variable.Variable :
imgs = imgs.data
try:
if type(imgs) == cupy.core.core.ndarray:
imgs = cuda.to_cpu(imgs)
except:
pass
return imgs
def save_hdf5(filename, obj):
gpu = (hasattr(obj, "xp") and obj.xp == cuda.cupy)
if gpu: obj.to_cpu()
serializers.save_hdf5(filename, obj)
if gpu: obj.to_gpu()
def evaluate(dataset, model, args):
sum_correct = 0.
sum_loss_data = xp.zeros(())
for i in six.moves.range(0, len(dataset), args.batchsize):
x_batch_seq = make_batch([dataset[i + j:i + j + 1]
for j in range(args.batchsize)], train=False)
x_batch_seq, pos, neg = x_batch_seq[:4], x_batch_seq[4], x_batch_seq[5]
loss, correct = model.solve(
x_batch_seq, pos, neg, train=False, variablize=True)
sum_loss_data += loss.data
sum_correct += correct
return cuda.to_cpu(sum_loss_data) / len(dataset), sum_correct
def to_cpu(self):
self.model.to_cpu()
yolov2_predict_caltech.py 文件源码
项目:chainer-object-detection
作者: dsanno
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def to_cpu(self):
self.model.to_cpu()
def act(self, state):
with chainer.using_config('train', False):
s = self.batch_states([state], self.xp, self.phi)
action = self.policy(s).sample()
# Q is not needed here, but log it just for information
q = self.q_function(s, action)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * float(q.data)
self.logger.debug('t:%s a:%s q:%s',
self.t, action.data[0], q.data)
return cuda.to_cpu(action.data[0])
def _act(self, state):
xp = self.xp
with chainer.using_config('train', False):
b_state = batch_states([state], xp, self.phi)
with chainer.no_backprop_mode():
action_distrib, v = self.model(b_state)
action = action_distrib.sample()
return cuda.to_cpu(action.data)[0], cuda.to_cpu(v.data)[0]
def _lossfun(self,
distribs, vs_pred, log_probs,
vs_pred_old, target_log_probs,
advs, vs_teacher):
prob_ratio = F.exp(log_probs - target_log_probs)
ent = distribs.entropy
prob_ratio = F.expand_dims(prob_ratio, axis=-1)
loss_policy = - F.mean(F.minimum(
prob_ratio * advs,
F.clip(prob_ratio, 1-self.clip_eps, 1+self.clip_eps) * advs))
if self.clip_eps_vf is None:
loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
else:
loss_value_func = F.mean(F.maximum(
F.square(vs_pred - vs_teacher),
F.square(_elementwise_clip(vs_pred,
vs_pred_old - self.clip_eps_vf,
vs_pred_old + self.clip_eps_vf)
- vs_teacher)
))
loss_entropy = -F.mean(ent)
# Update stats
self.average_loss_policy += (
(1 - self.average_loss_decay) *
(cuda.to_cpu(loss_policy.data) - self.average_loss_policy))
self.average_loss_value_func += (
(1 - self.average_loss_decay) *
(cuda.to_cpu(loss_value_func.data) - self.average_loss_value_func))
self.average_loss_entropy += (
(1 - self.average_loss_decay) *
(cuda.to_cpu(loss_entropy.data) - self.average_loss_entropy))
return (
loss_policy
+ self.value_func_coef * loss_value_func
+ self.entropy_coef * loss_entropy
)
def _compute_loss(self, exp_batch, gamma, errors_out=None):
"""Compute the Q-learning loss for a batch of experiences
Args:
experiences (list): see update()'s docstring
gamma (float): discount factor
Returns:
loss
"""
y, t = self._compute_y_and_t(exp_batch, gamma)
if errors_out is not None:
del errors_out[:]
delta = F.sum(abs(y - t), axis=1)
delta = cuda.to_cpu(delta.data)
for e in delta:
errors_out.append(e)
if 'weights' in exp_batch:
return compute_weighted_value_loss(
y, t, exp_batch['weights'],
clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(y, t, clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
def act(self, state):
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
action_value = self.model(
self.batch_states([state], self.xp, self.phi))
q = float(action_value.max.data)
action = cuda.to_cpu(action_value.greedy_actions.data)[0]
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * q
self.logger.debug('t:%s q:%s action_value:%s', self.t, q, action_value)
return action
def check_forward(self, xs):
y = chainerrl.functions.weighted_sum_arrays(xs, weights=self.weights)
correct_y = sum(x * w for x, w in zip(self.xs, self.weights))
gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
def check_forward(self, diag_data, non_diag_data):
diag = chainer.Variable(diag_data)
non_diag = chainer.Variable(non_diag_data)
y = lower_triangular_matrix(diag, non_diag)
correct_y = numpy.zeros(
(self.batch_size, self.n, self.n), dtype=numpy.float32)
tril_rows, tril_cols = numpy.tril_indices(self.n, -1)
correct_y[:, tril_rows, tril_cols] = cuda.to_cpu(non_diag_data)
diag_rows, diag_cols = numpy.diag_indices(self.n)
correct_y[:, diag_rows, diag_cols] = cuda.to_cpu(diag_data)
gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
def check_forward(self, xs):
y = chainerrl.functions.sum_arrays(xs)
correct_y = sum(self.xs)
gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))