def out_generated_image(gen, dis, rows, cols, seed, dst):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
#z = Variable(xp.zeros((n_images, 100, 1), dtype=xp.float32))
label = [i for i in range(rows) for j in range(cols)]
with chainer.using_config('train', False):
x = gen(z, label)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
# gen_output_activation_func is sigmoid
x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
# gen output_activation_func is tanh
#x = np.asarray(np.clip((x+1) * 0.5 * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 1, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir +\
'/image{:0>6}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
python类cuda()的实例源码
def out_generated_image(gen, dis, rows, cols, seed, dst):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
# gen_output_activation_func is sigmoid
x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
# gen output_activation_func is tanh
#x = np.asarray(np.clip((x+1) * 0.5 * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 1, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W))
preview_dir = '{}/preview_LSGAN_pixel_shuffler'.format(dst)
preview_path = preview_dir +\
'/image{:0>8}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
def convert_debug(self, content_img, init_img, output_directory,
max_iteration=1000, debug_span=100, random_init=False,
xsplit=3, ysplit=3, overwrap=50, average_pooling=False):
init_array = self.xp.array(neural_art.utility.img2array(init_img))
content_array = neural_art.utility.img2array(content_img)
if random_init:
init_array = self.xp.random.uniform(-20, 20, init_array.shape, dtype=init_array.dtype)
subrects = []
### (step-wrap)*(split-1) = w-step
xstep = (init_array.shape[2]+(xsplit-1)*overwrap-1) / xsplit
ystep = (init_array.shape[3]+(ysplit-1)*overwrap-1) / ysplit
for x in range(0, init_array.shape[2]-xstep, xstep-overwrap):
for y in range(0, init_array.shape[3]-ystep, ystep-overwrap):
subrects.append((x, y, x+xstep, y+ystep))
rects_content_layers = []
target_texture_ratios = []
for x1, y1, x2, y2 in subrects:
subimg = self.xp.asarray(content_array[:, :, x1:x2, y1:y2])
layers = self.model.forward_layers(chainer.Variable(subimg, volatile=True))
texture_feature = self.converter._to_texture_feature(layers)
target_texture_ratio = self.converter.optimize_texture_feature(texture_feature)
target_texture_ratios.append(target_texture_ratio)
parameter_now = chainer.links.Parameter(init_array)
self.optimizer.setup(parameter_now)
for i in xrange(max_iteration+1):
neural_art.utility.print_ltsv({"iteration": i})
if i % debug_span == 0 and i > 0:
print("save")
neural_art.utility.array2img(chainer.cuda.to_cpu(parameter_now.W.data)).save(
os.path.join(output_directory, "{}.png".format(i)))
parameter_now.zerograds()
for (x1, y1, x2, y2), target_texture_ratio in zip(subrects, target_texture_ratios):
subimg = self.xp.asarray(content_array[:, :, x1:x2, y1:y2])
contents_layers = self.model.forward_layers(chainer.Variable(subimg, volatile=True))
contents_layers = [
chainer.Variable(layer.data) for layer in contents_layers
]
x = chainer.Variable(self.xp.ascontiguousarray(parameter_now.W.data[:, :, x1:x2, y1:y2]))
layers = self.model.forward_layers(x, average_pooling=average_pooling)
texture_feature = self.converter._to_texture_feature(layers)
target_texture_feature = self.converter._constructed_feature(target_texture_ratio)
loss_texture = self.converter.squared_error(
texture_feature,
target_texture_feature
)
loss_content = self.converter._contents_loss(layers, contents_layers)
loss = self.texture_weight * loss_texture + self.content_weight * loss_content
loss.backward()
parameter_now.W.grad[:, :, x1:x2, y1:y2] += x.grad
self.optimizer.update()
return neural_art.utility.array2img(chainer.cuda.to_cpu(parameter_now.W.data))
def __call__(self, inputs, outputs, disable=(), train=True,tuning_layer='fc1000'):
"""Executes a sub-network of the network.
This function acts as an interpreter of the network definition for
Caffe. On execution, it interprets each layer one by one, and if the
bottom blobs are already computed, then emulates the layer and stores
output blobs as :class:`~chainer.Variable` objects.
Args:
inputs (dict): A dictionary whose key-value pairs indicate initial
correspondences between blob names and
:class:`~chainer.Variable` objects.
outputs (Iterable): A list of blob names whose corresponding
:class:`~chainer.Variable` objects are returned.
disable (Iterable): A list of layer names that will be ignored
during the forward computation.
train (bool): If ``True``, this function emulates the TRAIN phase
of the Caffe layers. Otherwise, it emulates the TEST phase.
Returns:
tuple: A tuple of output :class:`~chainer.Variable` objects
corresponding to elements of the `outputs` argument.
"""
self.train = False
variables = dict(inputs)
#exit()
cnt=1
self.cleargrads()
for func_name, bottom, top in self.layers:
cnt+=1
if (func_name in disable or
func_name not in self.forwards or
any(blob not in variables for blob in bottom)):
continue
#import cupy.cuda.runtime as rt
#print cnt,func_name,rt.memGetInfo()[0]/1024
#print cnt,func_name
func = self.forwards[func_name]
input_vars = tuple(variables[blob] for blob in bottom)
if func_name==tuning_layer:
volatile = 'off' if train else 'on'
new_input_vars =[]
for blob in input_vars:
new_input_vars.append(
chainer.Variable(blob.data,volatile=volatile))
input_vars = new_input_vars
self.train=True
output_vars = func(*input_vars)
#if cnt==tuning_layer:
if not isinstance(output_vars, collections.Iterable):
output_vars = output_vars,
for var, name in zip(output_vars, top):
variables[name] = var
self.variables = variables
#print variables
return tuple(variables[blob] for blob in outputs)
def __call__(self, x, im_info):
h, n = self.trunk(x), x.data.shape[0]
rpn_cls_score = self.rpn_cls_score(h)
c, hh, ww = rpn_cls_score.data.shape[1:]
rpn_bbox_pred = self.rpn_bbox_pred(h)
rpn_cls_score = F.reshape(rpn_cls_score, (n, 2, -1))
# RoI Proposal
rpn_cls_prob = F.softmax(rpn_cls_score)
rpn_cls_prob_reshape = F.reshape(rpn_cls_prob, (n, c, hh, ww))
rois = self.proposal_layer(
rpn_cls_prob_reshape, rpn_bbox_pred, im_info, self.train)
if self.gpu >= 0:
rois = to_gpu(rois, device=self.gpu)
im_info = to_gpu(im_info, device=self.gpu)
with chainer.cuda.Device(self.gpu):
boxes = rois[:, 1:5] / im_info[0][2]
else:
boxes = rois[:, 1:5] / im_info[0][2]
self.rois = rois
rois = chainer.Variable(rois, volatile=not self.train)
# RCNN
pool5 = roi_pooling_2d(self.trunk.feature, rois, 7, 7, 0.0625)
fc6 = F.relu(self.fc6(pool5))
fc7 = F.relu(self.fc7(fc6))
self.score_fc7 = self.cls_score(fc7)
self.scores = F.softmax(self.score_fc7)
#print "score",self.score_fc7.shape
box_deltas = self.bbox_pred(fc7).data
self.deltas = box_deltas
#print "box_delta",box_deltas.shape
pred_boxes = bbox_transform_inv(boxes, box_deltas, self.gpu)
self.pred_boxes = clip_boxes(pred_boxes, im_info[0][:2], self.gpu)
if self.train:
# loss_cls = F.softmax_cross_entropy(cls_score, labels)
# huber loss with delta=1 means SmoothL1Loss
return None
else:
return self.scores, self.pred_boxes