python类to_cpu()的实例源码

dqn_trainer.py 文件源码 项目:chainer_pong 作者: icoxfog417 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calc_loss(self, states, actions, rewards, next_states, episode_ends):
        qv = self.agent.q(states)
        q_t = self.target(next_states)  # Q(s', *)
        max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32)  # max_a Q(s', a)

        target = cuda.to_cpu(qv.data.copy())
        for i in range(self.replay_size):
            if episode_ends[i][0] is True:
                _r = np.sign(rewards[i])
            else:
                _r = np.sign(rewards[i]) + self.gamma * max_q_prime[i]

            target[i, actions[i]] = _r

        td = Variable(self.target.arr_to_gpu(target)) - qv
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zeros)
        self._loss = loss.data
        self._qv = np.max(qv.data)
        return loss
train_model.py 文件源码 项目:ROCStory_skipthought_baseline 作者: soskek 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def save(model, optimizer, save_name, args):
    serializers.save_npz(save_name + "model", copy.deepcopy(model).to_cpu())
    serializers.save_npz(save_name + "optimizer", optimizer)
    print('save', save_name)
pgt.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def act(self, state):

        with chainer.using_config('train', False):
            s = self.batch_states([state], self.xp, self.phi)
            if self.act_deterministically:
                action = self.policy(s).most_probable
            else:
                action = self.policy(s).sample()
            # Q is not needed here, but log it just for information
            q = self.q_function(s, action)

        # Update stats
        self.average_q *= self.average_q_decay
        self.average_q += (1 - self.average_q_decay) * float(q.data)

        self.logger.debug('t:%s a:%s q:%s',
                          self.t, action.data[0], q.data)
        return cuda.to_cpu(action.data[0])
updater.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def getAndUpdateBufferY(self, data):

        if  self._iter < self._max_buffer_size:
            self._buffer_y[self._iter, :] = data[0]
            return data

        self._buffer_y[0:self._max_buffer_size-2, :] = self._buffer_y[1:self._max_buffer_size-1, :]
        self._buffer_y[self._max_buffer_size-1, : ]=data[0]

        if np.random.rand() < 0.5:
            return data
        id = np.random.randint(0, self._max_buffer_size)
        return self._buffer_y[id, :].reshape((1, 3, self._image_size, self._image_size))
        """
    def save_images(self,img, w=2, h=3):
        img = cuda.to_cpu(img)
        img = img.reshape((w, h, 3, self._image_size, self._image_size))
        img = img.transpose(0,1,3,4,2)
        img = (img + 1) *127.5
        img = np.clip(img, 0, 255)
        img = img.astype(np.uint8)
        img = img.reshape((w, h, self._image_size, self._image_size, 3)).transpose(0,2,1,3,4).reshape((w*self._image_size, h*self._image_size, 3))[:,:,::-1]
        Image.fromarray(img).save(self._eval_foler+"/iter_"+str(self._iter)+".jpg")
        """
save_images.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def copy_to_cpu(imgs):
    if type(imgs) == chainer.variable.Variable :
        imgs = imgs.data
    try:
        if type(imgs) == cupy.core.core.ndarray:
            imgs = cuda.to_cpu(imgs)
    except:
        pass
    return imgs
train.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def validate(test_data, test_labels, model, batchsize, silent, gpu):
    N_test = test_data.shape[0]
    pbar = ProgressBar(0, N_test)
    sum_accuracy = 0
    sum_loss = 0

    for i in range(0, N_test, batchsize):
        x_batch = test_data[i:i + batchsize]
        y_batch = test_labels[i:i + batchsize]

        if gpu >= 0:
            x_batch = cuda.to_gpu(x_batch.astype(np.float32))
            y_batch = cuda.to_gpu(y_batch.astype(np.int32))

        x = Variable(x_batch)
        t = Variable(y_batch)
        loss, acc = model(x, t, train=False)

        sum_loss += float(cuda.to_cpu(loss.data)) * y_batch.size
        sum_accuracy += float(cuda.to_cpu(acc.data)) * y_batch.size
        if not silent:
            pbar.update(i + y_batch.size)

    return sum_loss, sum_accuracy
test_integer_indexing_cuda_kernel.py 文件源码 项目:GrouPy 作者: tscohen 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_index_group_func():
    import numpy as np
    import cupy as cp
    from chainer import cuda
    input = np.random.randn(2, 3, 4, 5, 6)
    I = np.random.randint(0, 4, (7, 8, 9, 10))
    J = np.random.randint(0, 5, (7, 8, 9, 10))
    K = np.random.randint(0, 6, (7, 8, 9, 10))

    output = input[..., I, J, K].swapaxes(1, 2)

    cpoutput = cp.zeros(output.shape)
    cpinput = cuda.to_gpu(input)
    cpI = cuda.to_gpu(I)
    cpJ = cuda.to_gpu(J)
    cpK = cuda.to_gpu(K)

    index_group_func_kernel(cpinput, cpI, cpJ, cpK, cpoutput)

    cpoutput = cuda.to_cpu(cpoutput)

    error = np.abs(cpoutput - output).sum()
    print(error)
    assert np.isclose(error, 0.)
test_transform_filter.py 文件源码 项目:GrouPy 作者: tscohen 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def check_transform_grad(inds, w, transformer, dtype, toll):
    from chainer import gradient_check

    inds = cuda.to_gpu(inds)

    W = Variable(w.astype(dtype))
    R = transformer(inds)

    RW = R(W)

    RW.grad = cp.random.randn(*RW.data.shape).astype(dtype)
    RW.backward(retain_grad=True)

    func = RW.creator
    fn = lambda: func.forward((W.data,))
    gW, = gradient_check.numerical_grad(fn, (W.data,), (RW.grad,))

    gan = cuda.to_cpu(gW)
    gat = cuda.to_cpu(W.grad)

    relerr = np.max(np.abs(gan - gat) / np.maximum(np.abs(gan), np.abs(gat)))

    print (dtype, toll, relerr)
    assert relerr < toll
test_gconv.py 文件源码 项目:GrouPy 作者: tscohen 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def check_equivariance(im, layers, input_array, output_array, point_group):

    # Transform the image
    f = input_array(im)
    g = point_group.rand()
    gf = g * f
    im1 = gf.v

    # Apply layers to both images
    im = Variable(cuda.to_gpu(im))
    im1 = Variable(cuda.to_gpu(im1))

    fmap = im
    fmap1 = im1
    for layer in layers:
        layer.to_gpu()
        fmap = layer(fmap)
        fmap1 = layer(fmap1)

    # Transform the computed feature maps
    fmap1_garray = output_array(cuda.to_cpu(fmap1.data))
    r_fmap1_data = (g.inv() * fmap1_garray).v

    fmap_data = cuda.to_cpu(fmap.data)
    assert np.allclose(fmap_data, r_fmap1_data, rtol=1e-5, atol=1e-3)
lstm_parser_bi_fast.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def concat_examples(batch, device=None):
    if len(batch) == 0:
        raise ValueError('batch is empty')

    if device is None:
        def to_device(x):
            return x
    elif device < 0:
        to_device = cuda.to_cpu
    else:
        def to_device(x):
            return cuda.to_gpu(x, device, cuda.Stream.null)

    result = [to_device(_concat_arrays([s[0] for s in batch], -1)), # ws
              to_device(_concat_arrays([s[1] for s in batch], -1)), # ps
              to_device(_concat_arrays([s[2] for s in batch], -1)), # ss
              [s[3] for s in batch]]                                # ls

    if len(batch[0]) == 7:
        result.append([to_device(s[4]) for s in batch])            # cat_ts
        result.append([to_device(s[5]) for s in batch])            # dep_ts
        result.append(to_device(_concat_arrays([s[6] for s in batch], None))) # weights

    return tuple(result)
train.py 文件源码 项目:chainer-dfi 作者: dsanno 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def mean_feature(net, paths, image_size, base_feature, top_num, batch_size, clip_rect=None):
    xp = net.xp
    image_num = len(paths)
    features = []
    for i in six.moves.range(0, image_num, batch_size):
        x = [preprocess_image(Image.open(path).convert('RGB'), image_size, clip_rect) for path in paths[i:i + batch_size]]
        x = xp.asarray(np.concatenate(x, axis=0))
        y = feature(net, x)
        features.append([cuda.to_cpu(layer.data) for layer in y])
    if image_num > top_num:
        last_features = np.concatenate([f[-1] for f in features], axis=0)
        last_features = last_features.reshape((last_features.shape[0], -1))
        base_feature = cuda.to_cpu(base_feature).reshape((1, -1,))
        diff = np.sum((last_features - base_feature) ** 2, axis=1)

        nearest_indices = np.argsort(diff)[:top_num]
        nearests = [np.concatenate(xs, axis=0)[nearest_indices] for xs in zip(*features)]
    else:
        nearests = [np.concatenate(xs, axis=0) for xs in zip(*features)]

    return [xp.asarray(np.mean(f, axis=0, keepdims=True)) for f in nearests]
model.py 文件源码 项目:self-driving-cars 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def eps_greedy(self, state_batch, exploration_rate):
        if state_batch.ndim == 1:
            state_batch = state_batch.reshape(1, -1)
        elif state_batch.ndim == 3:
            state_batch = state_batch.reshape(-1, 34 * config.rl_history_length)
        prop = np.random.uniform()
        if prop < exploration_rate:
            action_batch = np.random.randint(0, len(config.actions), (state_batch.shape[0],))
            q = None
        else:
            state_batch = Variable(state_batch)
            if config.use_gpu:
                state_batch.to_gpu()
            q = self.compute_q_variable(state_batch, test=True)
            if config.use_gpu:
                q.to_cpu()
            q = q.data
            action_batch = np.argmax(q, axis=1)
        for i in xrange(action_batch.shape[0]):
            action_batch[i] = self.get_action_for_index(action_batch[i])
        return action_batch, q
test_negative_sampling.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def check_backward(self, x_data, t_data, y_grad):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        W = self.link.W

        y = self.link(x, t)
        y.grad = y_grad
        y.backward()

        # fix samples
        negative_sampling.NegativeSamplingFunction.samples = y.creator.samples

        def f():
            return self.link(x, t).data,
        gx, gW = gradient_check.numerical_grad(
            f, (x.data, W.data), (y.grad,), eps=1e-2)
        del negative_sampling.NegativeSamplingFunction.samples  # clean up

        gradient_check.assert_allclose(
            cuda.to_cpu(gx), cuda.to_cpu(x.grad), atol=1.e-4)
        gradient_check.assert_allclose(
            cuda.to_cpu(gW), cuda.to_cpu(W.grad), atol=1.e-4)
test_contrastive.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def check_forward(self, x0_data, x1_data, t_data):
        x0_val = chainer.Variable(x0_data)
        x1_val = chainer.Variable(x1_data)
        t_val = chainer.Variable(t_data)
        loss = functions.contrastive(x0_val, x1_val, t_val, self.margin)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = float(cuda.to_cpu(loss.data))

        # Compute expected value
        loss_expect = 0
        for i in six.moves.range(self.x0.shape[0]):
            x0d, x1d, td = self.x0[i], self.x1[i], self.t[i]
            d = numpy.sum((x0d - x1d) ** 2)
            if td == 1:  # similar pair
                loss_expect += d
            elif td == 0:  # dissimilar pair
                loss_expect += max(self.margin - math.sqrt(d), 0) ** 2
        loss_expect /= 2.0 * self.t.shape[0]
        self.assertAlmostEqual(loss_expect, loss_value, places=5)
test_max_pooling_2d.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
                                     cover_all=self.cover_all,
                                     use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                x = self.x[k, c]
                if self.cover_all:
                    expect = numpy.array([
                        [x[0:2, 0:2].max(), x[0:2, 1:3].max()],
                        [x[1:4, 0:2].max(), x[1:4, 1:3].max()],
                        [x[3:4, 0:2].max(), x[3:4, 1:3].max()]])
                else:
                    expect = numpy.array([
                        [x[0:2, 0:2].max(), x[0:2, 1:3].max()],
                        [x[1:4, 0:2].max(), x[1:4, 1:3].max()]])
                gradient_check.assert_allclose(expect, y_data[k, c])
test_roi_pooling_2d.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def check_backward(self, x_data, roi_data, y_grad):
        x = chainer.Variable(x_data)
        rois = chainer.Variable(roi_data)
        y = functions.roi_pooling_2d(x, rois, outh=self.outh, outw=self.outw,
                                     spatial_scale=self.spatial_scale)
        y.grad = y_grad
        y.backward()

        xs = (x.data, rois.data)

        def f():
            func = y.creator
            return func.forward(xs)

        gx, _ = gradient_check.numerical_grad(f, xs, (y.grad,))
        gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad))
test_average_pooling_2d.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.average_pooling_2d(x, 3, stride=2,
                                         pad=1, use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                x = self.x[k, c]
                expect = numpy.array([
                    [x[0:2, 0:2].sum(), x[0:2, 1:3].sum()],
                    [x[1:4, 0:2].sum(), x[1:4, 1:3].sum()]]) / 9
                gradient_check.assert_allclose(
                    expect, y_data[k, c], **self.check_forward_options)
test_local_response_normalization.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.local_response_normalization(x)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        # Naive implementation
        y_expect = numpy.zeros_like(self.x)
        for n, c, h, w in numpy.ndindex(self.x.shape):
            s = 0
            for i in six.moves.range(max(0, c - 2), min(7, c + 2)):
                s += self.x[n, i, h, w] ** 2
            denom = (2 + 1e-4 * s) ** .75
            y_expect[n, c, h, w] = self.x[n, c, h, w] / denom

        gradient_check.assert_allclose(
            y_expect, y_data, **self.check_forward_optionss)
test_binary_accuracy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = chainer.functions.binary_accuracy(x, t)
        self.assertEqual(y.data.dtype, self.dtype)
        self.assertEqual((), y.data.shape)

        count = 0
        correct = 0
        x_flatten = self.x.ravel()
        t_flatten = self.t.ravel()
        for i in six.moves.range(t_flatten.size):
            if t_flatten[i] == -1:
                continue
            pred = int(x_flatten[i] >= 0)
            if pred == t_flatten[i]:
                correct += 1
            count += 1
        expected = float(correct) / count
        gradient_check.assert_allclose(
            expected, cuda.to_cpu(y.data), **self.check_forward_options)
train_eval.py 文件源码 项目:deep_metric_learning 作者: ronekko 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def iterate_forward(model, epoch_iterator, normalize=False):
    xp = model.xp
    y_batches = []
    c_batches = []
    for batch in tqdm(copy.copy(epoch_iterator)):
        x_batch_data, c_batch_data = batch
        x_batch = Variable(xp.asarray(x_batch_data))
        y_batch = model(x_batch)
        if normalize:
            y_batch_data = y_batch.data / xp.linalg.norm(
                y_batch.data, axis=1, keepdims=True)
        else:
            y_batch_data = y_batch.data
        y_batches.append(y_batch_data)
        y_batch = None
        c_batches.append(c_batch_data)
    y_data = cuda.to_cpu(xp.concatenate(y_batches))
    c_data = np.concatenate(c_batches)
    return y_data, c_data


# memory friendly average accuracy for test data
test_googlenet.py 文件源码 项目:deep_metric_learning 作者: ronekko 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def check_extract(self):
        x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
        x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)

        result = self.link.extract([x1, x2], layers=['pool5', 'loss3_fc'])
        self.assertEqual(len(result), 2)
        y1 = cuda.to_cpu(result['pool5'].data)
        self.assertEqual(y1.shape, (2, 1024, 1, 1))
        self.assertEqual(y1.dtype, numpy.float32)
        y2 = cuda.to_cpu(result['loss3_fc'].data)
        self.assertEqual(y2.shape, (2, 1000))
        self.assertEqual(y2.dtype, numpy.float32)

        x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
        result = self.link.extract([x3], layers=['pool1'], size=None)
        self.assertEqual(len(result), 1)
        y3 = cuda.to_cpu(result['pool1'].data)
        self.assertEqual(y3.shape, (1, 64, 20, 15))
        self.assertEqual(y3.dtype, numpy.float32)
test_clustering_loss.py 文件源码 项目:deep_metric_learning 作者: ronekko 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def check_forward(self, x_data, c_data, gamma, T, y_star, y_pam):
        num_examples = len(x_data)
        x = chainer.Variable(x_data)
        c = chainer.Variable(c_data)

        loss = clustering_loss(x, c, gamma, T)

        sq_distances_ij = []
        for i, j in zip(range(num_examples), y_pam):
            sqd_ij = np.sum((x_data[i] - x_data[j]) ** 2)
            sq_distances_ij.append(sqd_ij)
        f = -sum(sq_distances_ij)

        sq_distances_ij = []
        for i, j in zip(range(num_examples), y_star):
            sqd_ij = np.sum((x_data[i] - x_data[j]) ** 2)
            sq_distances_ij.append(sqd_ij)
        f_tilde = -sum(sq_distances_ij)

        delta = 1.0 - normalized_mutual_info_score(cuda.to_cpu(c_data), y_pam)
        loss_expected = f + gamma * delta - f_tilde

        testing.assert_allclose(loss.data, loss_expected)
mean_squared_error_test.py 文件源码 项目:DeepPoseComparison 作者: ynaka81 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def check_forward(self, x_data, t_data, v_data, use_visibility):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        v = chainer.Variable(v_data)
        loss = mean_squared_error(x, t, v, use_visibility)
        loss_value = cuda.to_cpu(loss.data)
        eq_(loss_value.dtype, np.float32)
        eq_(loss_value.shape, ())
        # compute expected value.
        loss_expect = 0.
        for i in np.ndindex(self.x.shape):
            diff = self.x[i] - self.t[i]
            if use_visibility:
                diff *= self.v[i[:-1]]
            loss_expect += diff**2
        if use_visibility:
            N = self.v.sum()/2
        else:
            N = self.x.size/2
        loss_expect /= N
        self.assertAlmostEqual(loss_expect, loss_value, places=5)
util.py 文件源码 项目:vsmlib 作者: undertherain 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def concat_examples(batch, device=None, padding=0):
    if len(batch) == 0:
        raise ValueError('batch is empty')

    if device is None:
        def to_device(x):
            return x
    elif device < 0:
        to_device = cuda.to_cpu
    else:
        def to_device(x):
            return cuda.to_gpu(x, device, cuda.Stream.null)

    first_elem = batch[0]

    if isinstance(first_elem, tuple):
        result = []
        if not isinstance(padding, tuple):
            padding = [padding] * len(first_elem)

        for i in six.moves.range(len(first_elem)):
            result.append(to_device(_concat_arrays(
                [example[i] for example in batch], padding[i])))

        return tuple(result)
seq2seq.py 文件源码 项目:TOHO_AI 作者: re53min 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_decode(self, start, eos, limit):
        output = []
        y = chainer.Variable(np.array([[start]], dtype=np.int32))

        for i in range(limit):
            decode0 = self.output_embed(y)
            decode1 = self.decode1(decode0)
            decode2 = self.decode2(decode1)
            z = self.output(decode2)
            prob = F.softmax(z)

            index = np.argmax(cuda.to_cpu(prob.data))

            if index == eos:
                break
            output.append(index)
            y = chainer.Variable(np.array([index], dtype=np.int32))
        return output
simplifier2.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_as_img(array, name, origin, transposed=False):
    if transposed:
        origin = origin.transpose(2, 1, 0)
        array = array.transpose(2, 1, 0)
    else:
        origin = origin.transpose(1, 2, 0)
        array = array.transpose(1, 2, 0)

    array = array * 255
    array = array.clip(0, 255).astype(np.uint8)
    img = cuda.to_cpu(array)
    origin = origin.clip(0, 255).astype(np.uint8)

    if args.concat:
        img_concat = cv2.hconcat([origin, img])
        cv2.imwrite(name, img_concat)
    else:
        cv2.imwrite(name, img)
simplifier1.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def save_as_img(array, name, origin, transposed=False):
    if transposed:
        origin = origin.transpose(2, 1, 0)
        array = array.transpose(2, 1, 0)
    else:
        origin = origin.transpose(1, 2, 0)
        array = array.transpose(1, 2, 0)

    array = array * 255
    array = array.clip(0, 255).astype(np.uint8)
    img = cuda.to_cpu(array)
    origin = origin.clip(0, 255).astype(np.uint8)

    if args.concat:
        img_concat = cv2.hconcat([origin, img])
        cv2.imwrite(name, img_concat)
    else:
        cv2.imwrite(name, img)
compare.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def save_as_img(model1, model2, name, origin, transposed=False):
    if transposed:
        origin = origin.transpose(2, 1, 0)
        model1 = model1.transpose(2, 1, 0)
        model2 = model2.transpose(2, 1, 0)
    else:
        origin = origin.transpose(1, 2, 0)
        model1 = model1.transpose(1, 2, 0)
        model2 = model2.transpose(1, 2, 0)

    model1 = model1 * 255
    model1 = model1.clip(0, 255).astype(np.uint8)
    img1 = cuda.to_cpu(model1)

    model2 = model2 * 255
    model2 = model2.clip(0, 255).astype(np.uint8)
    img2 = cuda.to_cpu(model2)

    origin = origin.clip(0, 255).astype(np.uint8)

    img_concat = cv2.hconcat([origin, img1])
    img_concat = cv2.hconcat([img_concat, img2])
    cv2.imwrite(name, img_concat)
test_pickable_sequential_chain.py 文件源码 项目:chainercv 作者: chainer 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def check_call(self, x, expects):
        outs = self.link(x)

        if isinstance(self.pick, tuple):
            pick = self.pick
        else:
            if self.pick is None:
                pick = ('l2',)
            else:
                pick = (self.pick,)
            outs = (outs,)

        self.assertEqual(len(outs), len(pick))

        for out, layer_name in zip(outs, pick):
            self.assertIsInstance(out, chainer.Variable)
            self.assertIsInstance(out.array, self.link.xp.ndarray)

            out = to_cpu(out.array)
            np.testing.assert_equal(out, to_cpu(expects[layer_name].array))
test_non_maximum_suppression.py 文件源码 项目:chainercv 作者: chainer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def check_non_maximum_suppression_options(
            self, bbox, threshold, score, limit):
        # Pass all options to the tested function
        scored_selec = non_maximum_suppression(bbox, threshold, score, limit)
        self.assertIsInstance(scored_selec, type(bbox))

        # Reorder inputs befor passing it to the function.
        # Reorder the outputs according to scores.
        order = score.argsort()[::-1]
        reordered_selec = non_maximum_suppression(
            bbox[order], threshold, score=None, limit=None)
        reordered_selec = reordered_selec[:limit]
        reordered_selec = order[reordered_selec]

        np.testing.assert_equal(
            cuda.to_cpu(scored_selec), cuda.to_cpu(reordered_selec))


问题


面经


文章

微信
公众号

扫码关注公众号