python类ndarray()的实例源码

function.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward_preprocess(self, function, in_data):
        """Callback function invoked before forward propagation.

        Args:
            function(~chainer.Function): Function object to which
                the function hook is registered.
            in_data(tuple of numpy.ndarray or tuple of cupy.ndarray):
                Input data of forward propagation.
        """
        pass
function.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward_postprocess(self, function, in_data):
        """Callback function invoked after forward propagation.

        Args:
            function(~chainer.Function): Function object to which
                the function hook is registered.
            in_data(tuple of numpy.ndarray or tuple of cupy.ndarray):
                Input data of forward propagation.
        """
        pass

    # backward
function.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def backward_postprocess(self, function, in_data, out_grad):
        """Callback function invoked after backward propagation.

        Args:
            function(~chainer.Function): Function object to which
                the function hook is registered.
            in_data(tuple of numpy.ndarray or tuple of cupy.ndarray):
                Input of forward propagation.
            out_grad(tuple of numpy.ndarray or tuple of cupy.ndarray):
                Gradient data of backward propagation.
        """
        pass
variable.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, data, volatile=flag.OFF, name=None):
        if not isinstance(data, (numpy.ndarray, cuda.ndarray)):
            msg = '''numpy.ndarray or cuda.ndarray are expected.
Actual: {0}'''.format(type(data))
            raise TypeError(msg)

        self.data = data
        self.rank = 0
        self._volatile = flag.Flag(volatile)

        self._grad = None
        self.creator = None

        self.name = name
variable.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def addgrad(self, var):
        """Accumulates the gradient array from given source variable.

        This method just runs ``self.grad += var.grad``, except that the
        accumulation is even done across the host and different devices.

        Args:
            var (Variable): Source variable.

        """
        src = var._grad
        dst = self._grad
        if src is None:
            raise ValueError('Source gradient is not set.')
        if dst is None:
            raise ValueError('Target graidient is not set.')

        xp = cuda.get_array_module(dst)
        if xp is numpy:
            dst += cuda.to_cpu(src)
        elif isinstance(src, numpy.ndarray):
            dst += cuda.to_gpu(src, device=dst)
        else:
            dst_dev = dst.device
            if dst_dev == src.device:
                dst += src
            else:
                with dst_dev:
                    dst += xp.copy(src)
test_basic_math.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_array_gpu(self):
        self._check_array(cuda.ndarray([1, 2]), 'constant array')
model.py 文件源码 项目:teras 作者: chantera 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, *args, dropout=0.0):
        embeds = []
        self.size = 0
        for i, _args in enumerate(args):
            if isinstance(_args, dict):
                vocab_size = _args.get('in_size', None)
                embed_size = _args.get('out_size', None)
                embeddings = _args.get('initialW', None)
                if vocab_size is None or embed_size is None:
                    if embeddings is None:
                        raise ValueError('embeddings or in_size/out_size '
                                         'must be specified')
                    vocab_size, embed_size = embeddings.shape
                    _args['in_size'] = vocab_size
                    _args['out_size'] = embed_size
            else:
                if isinstance(_args, np.ndarray):
                    vocab_size, embed_size = _args.shape
                    embeddings = _args
                elif isinstance(_args, tuple) and len(embeddings) == 2:
                    vocab_size, embed_size = _args
                    embeddings = None
                else:
                    raise ValueError('embeddings must be '
                                     'np.ndarray or tuple(len=2)')
                _args = {'in_size': vocab_size, 'out_size': embed_size,
                         'initialW': embeddings}
            embeds.append(EmbedID(**_args))
            self.size += embed_size
        super(Embed, self).__init__(*embeds)

        assert dropout == 0 or type(dropout) == float
        self._dropout_ratio = dropout
gan.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def to_numpy(self, x):
        if isinstance(x, Variable) == True:
            x = x.data
        if isinstance(x, cuda.ndarray) == True:
            x = cuda.to_cpu(x)
        return x
wavenet.py 文件源码 项目:wavenet 作者: musyoku 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def to_numpy(self, x):
        if isinstance(x, Variable) == True:
            x = x.data
        if isinstance(x, cuda.ndarray) == True:
            x = cuda.to_cpu(x)
        return x
gan.py 文件源码 项目:LSGAN 作者: musyoku 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def to_numpy(self, x):
        if isinstance(x, Variable) == True:
            x = x.data
        if isinstance(x, cuda.ndarray) == True:
            x = cuda.to_cpu(x)
        return x
adgm.py 文件源码 项目:adgm 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def to_numpy(self, x):
        if isinstance(x, Variable) == True:
            x.to_cpu()
            x = x.data
        if isinstance(x, cuda.ndarray) == True:
            x = cuda.to_cpu(x)
        return x
extensions.py 文件源码 项目:chainer-spatial-transformer-networks 作者: hvy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __call__(self, trainer):
        x = self.x
        dpi = self.dpi
        updater = trainer.updater

        filename = os.path.join(trainer.out, '{0:08d}.png'.format(
                                updater.iteration))

        # Inference to update model internal grid
        x = updater.converter(x, updater.device)
        model = updater.get_optimizer('main').target.predictor
        model(x)

        # Get grids from previous inference
        grid = model.st.grid.data
        if isinstance(grid, cuda.ndarray):
            grid = cuda.to_cpu(grid)
        if isinstance(x, cuda.ndarray):
            x = cuda.to_cpu(x)

        n, c, w, h = x.shape
        x_plots = math.ceil(math.sqrt(n))
        y_plots = x_plots if n % x_plots == 0 else x_plots - 1
        plt.figure(figsize=(w*x_plots/dpi, h*y_plots/dpi), dpi=dpi)

        for i, im in enumerate(x):
            plt.subplot(y_plots, x_plots, i+1)

            if c == 1:
                plt.imshow(im[0])
            else:
                plt.imshow(im.transpose((1, 2, 0)))

            plt.axis('off')
            plt.gca().set_xticks([])
            plt.gca().set_yticks([])
            plt.gray()

            # Get the 4 corners of the transformation grid to draw a box
            g = grid[i]
            vs = np.empty((4, 2), dtype=np.float32)
            vs[0] = g[:, 0, 0]
            vs[1] = g[:, 0, w-1]
            vs[2] = g[:, h-1, w-1]
            vs[3] = g[:, h-1, 0]
            vs += 1  # [-1, 1] -> [0, 2]
            vs /= 2
            vs[:, 0] *= h
            vs[:, 1] *= w

            bbox = plt.Polygon(vs, True, color='r', fill=False, linewidth=0.8,
                               alpha=0.8)
            plt.gca().add_patch(bbox)
            bbox.set_clip_on(False)  # Allow drawing outside axes

            plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
                                wspace=0.2, hspace=0.2)

        plt.savefig(filename, dpi=dpi*2, facecolor='black')
        plt.clf()
        plt.close()
gradient_check.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def numerical_grad(f, inputs, grad_outputs, eps=1e-3):
    """Computes numerical gradient by finite differences.

    This function is used to implement gradient check. For usage example, see
    unit tests of :mod:`chainer.functions`.

    Args:
        f (function): Python function with no arguments that runs forward
            computation and returns the result.
        inputs (tuple of arrays): Tuple of arrays that should be treated as
            inputs. Each element of them is slightly modified to realize
            numerical gradient by finite differences.
        grad_outputs (tuple of arrays): Tuple of arrays that are treated as
            output gradients.
        eps (float): Epsilon value of finite differences.

    Returns:
        tuple: Numerical gradient arrays corresponding to ``inputs``.

    """
    assert eps > 0
    inputs = tuple(inputs)
    grad_outputs = tuple(grad_outputs)
    gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)
    cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)

    if gpu and cpu:
        raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')

    if gpu:
        xp = cuda.cupy
    else:
        xp = numpy
    grads = tuple(xp.zeros_like(x) for x in inputs)
    for x, gx in zip(inputs, grads):
        for i in numpy.ndindex(x.shape):
            orig = x[i].copy()  # hold original value
            x[i] = orig + eps
            ys1 = _copy_arrays(f())
            x[i] = orig - eps
            ys2 = _copy_arrays(f())
            x[i] = orig
            for y1, y2, gy in zip(ys1, ys2, grad_outputs):
                if gy is not None:
                    dot = ((y1 - y2) * gy).sum()
                    gx[i] += dot / (2 * eps)
    return grads


问题


面经


文章

微信
公众号

扫码关注公众号