python类get_device()的实例源码

nutszebra_chainer.py 文件源码 项目:trainer 作者: nutszebra 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def check_gpu(self, gpu):
        """Check cuda.cupy

        Example:

        ::

            gpu = 0
            self.check_gpu(gpu)

        Args:
            gpu (int): gpu id
        """

        if gpu >= 0:
            cuda.get_device(gpu).use()
            self.to_gpu(gpu)
            return True
        return False
updater.py 文件源码 项目:chainer-began 作者: hvy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, *, iterator, noise_iterator, optimizer_generator,
                 optimizer_discriminator, generator_lr_decay_interval,
                 discriminator_lr_decay_interval, gamma, k_0, lambda_k,
                 loss_norm, device=-1):

        iterators = {'main': iterator, 'z': noise_iterator}
        optimizers = {'gen': optimizer_generator,
                      'dis': optimizer_discriminator}

        super().__init__(iterators, optimizers, device=device)

        self.gen_lr_decay_interval = generator_lr_decay_interval
        self.dis_lr_decay_interval = discriminator_lr_decay_interval

        self.k = k_0
        self.lambda_k = lambda_k
        self.gamma = gamma
        self.loss_norm = loss_norm

        if device >= 0:
            cuda.get_device(device).use()
            for optimizer in optimizers.values():
                optimizer.target.to_gpu()
optimizer.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def init_state(self, param, state):
        """Initializes the optimizer state corresponding to the parameter.

        This method should add needed items to the ``state`` dictionary. Each
        optimizer implementation that uses its own states should override this
        method or CPU/GPU dedicated versions (:meth:`init_state_cpu` and
        :meth:`init_state_gpu`).

        Args:
            param (~chainer.Variable): Parameter variable.
            state (dict): State dictionary.

        .. seealso:: :meth:`init_state_cpu`, :meth:`init_state_gpu`

        """
        with cuda.get_device(param.data) as dev:
            if int(dev) == -1:
                self.init_state_cpu(param, state)
            else:
                self.init_state_gpu(param, state)
test_variable.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_copydata_gpu_to_another_gpu(self):
        cp = cuda.cupy
        with cuda.get_device(0):
            data1 = cp.zeros(3, dtype=np.float32)
            expect = cp.ones(3, dtype=np.float32)
        with cuda.get_device(1):
            data2 = cp.ones(3, dtype=np.float32)
        self.check_copydata(data1, data2, expect)
updater.py 文件源码 项目:chainer-wasserstein-gan 作者: hvy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, *, iterator, noise_iterator, optimizer_generator,
                 optimizer_critic, device=-1):

        if optimizer_generator.target.name is None:
            optimizer_generator.target.name = 'generator'

        if optimizer_critic.target.name is None:
            optimizer_critic.target.name = 'critic'

        iterators = {'main': iterator, 'z': noise_iterator}
        optimizers = {'generator': optimizer_generator,
                      'critic': optimizer_critic}

        super().__init__(iterators, optimizers, device=device)

        if device >= 0:
            cuda.get_device(device).use()
            [optimizer.target.to_gpu() for optimizer in optimizers.values()]

        self.xp = cuda.cupy if device >= 0 else np
captioning.py 文件源码 项目:ImageCaptioning 作者: rkuga 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self,gpu,batchsize,data_dir,dataset,net,mode,epochs,save_every,size,**kwargs):
        super(Network, self).__init__(epochs,save_every)
        print "building ..."
        self.input_height=size
        self.input_width=size
        self.net = net
        self.mode=mode
        self.dataset=dataset
        self.train_data, self.test_data=self.get_dataset(data_dir,dataset)
        print 'input_channel ==> %d using %s dataset'%(self.in_channel, self.dataset)

        self.enc = GoogLeNet()
        self.dec = Decoder(self.in_size)

        self.xp = cuda.cupy
        cuda.get_device(gpu).use()

        self.enc.to_gpu()
        self.dec.to_gpu()

        self.o_dec = optimizers.RMSpropGraves()
        self.o_dec.setup(self.dec)

        self.batchsize=batchsize
weight_clip.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, opt):
        if cuda.available:
            kernel = cuda.elementwise(
                'T low, T high', 
                'T p', 
                'p = (p < low) ? low : (p > high) ? high : p',
                'weight_clip')

        for link in opt.target.links():
            # only apply to binary layers
            if getattr(link,'cname',False):
                for param in link.params():
                    p = param.data
                    with cuda.get_device(p) as dev:
                        if int(dev) == -1:
                            numpy.clip(p, self.low, self.high)
                        else:
                            kernel(self.low, self.high, p)
sequential.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def entropy_filter(self, x, b, ent_T):
        xp = cuda.get_array_module(b)
        eb = entropy(F.softmax(b))/np.log(b.shape[1])
        eb.to_cpu()
        if hasattr(eb.data,'get'):
            with cuda.get_device(eb.data):
                exited = eb.data < ent_T
            exited = exited.get()
        else:
            exited = eb.data < ent_T

        y_exit = []
        y_cont = []
        for i,idx in enumerate(exited):
            if idx:
                y_exit.append(b[i:i+1])
            else:
                y_cont.append(x[i:i+1])

        if len(y_exit) > 0:
            y_exit = F.vstack(y_exit)
        if len(y_cont) > 0:
            y_cont = F.vstack(y_cont)
        return y_exit,y_cont,exited
LSTMEncDecAttn.py 文件源码 项目:mlpnlp-nmt 作者: mlpnlp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def printAllParameters(self, optimizer, init_type="***", init_scale=1.0):
        total_norm = 0
        total_param = 0
        named_params = sorted(
            optimizer.target.namedparams(),
            key=lambda x: x[0])
        for n, p in named_params:
            t_norm = chainer.optimizer._sum_sqnorm(p.data)
            sys.stdout.write(
                '### {} {} {} {} {}\n'.format(
                    p.name, p.data.ndim, p.data.shape, p.data.size, t_norm))
            total_norm += t_norm
            total_param += p.data.size
        with cuda.get_device(total_norm):
            sys.stdout.write(
                '# param size= [{}] norm = [{}] scale=[{}, {}]\n'.format(
                    total_param, self.model.xp.sqrt(total_norm),
                    init_type, init_scale))

    ###############################################
    # ??????????????? ?? ????backward???????
graph_convolution.py 文件源码 项目:chainer-graph-cnn 作者: pfnet-research 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x):
        """Applies the graph convolutional layer.

        Args:
            x: (~chainer.Variable): Input graph signal.

        Returns:
            ~chainer.Variable: Output of the graph convolution.
        """
        if self.has_uninitialized_params:
            with cuda.get_device(self._device_id):
                self._initialize_params(x.shape[1])
        if self.b is None:
            return self.func(x, self.W)
        else:
            return self.func(x, self.W, self.b)
graph_max_pooling.py 文件源码 项目:chainer-graph-cnn 作者: pfnet-research 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
        x = inputs[0]
        xp = cuda.get_array_module(x)
        n_batch, c, N = x.shape
        N_coarse = len(self.pooling_inds)

        with cuda.get_device(x.data):
            x = x.transpose((2, 1, 0))
            p_dim = self.pooling_inds.shape[1]
            y = xp.empty((N_coarse, c, n_batch), dtype=x.dtype)
            self.max_inds = xp.empty((N_coarse, c, n_batch), dtype=np.int32)
            pooling_inds = cuda.to_gpu(self.pooling_inds)
            gpu_graphpool_fwd(N_coarse, p_dim, pooling_inds,
                              x, y, self.max_inds)
            y = y.transpose((2, 1, 0))

        return y,
linear.py 文件源码 项目:static-define-by-run 作者: bkvogel 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x):
        """Applies the linear layer.

        Args:
            x (~chainer.Variable): Batch of input vectors.

        Returns:
            ~chainer.Variable: Output of the linear layer.

        """
        if self.has_uninitialized_params:
            with cuda.get_device(self._device_id):
                self._initialize_params(x.size // x.shape[0])
        return linear.linear(x, self.W, self.b)
train_model.py 文件源码 项目:ROCStory_skipthought_baseline 作者: soskek 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setup_model(args):
    if args.model_type.lower() == "lstm":
        model = LSTM(args)
    else:
        print('set valid model type name')
        exit()

    optimizer = model.setup_optimizer()
    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        model.to_gpu()
    return model, optimizer
utils.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def multi_box_iou(a, b):
    with cuda.get_device(a.x):
        return multi_box_intersection(a, b) / multi_box_union(a, b)
yolov2.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def predict(self, input_x):
        if isinstance(input_x, chainer.Variable):
            device = cuda.get_device(input_x.data)
        else:
            device = cuda.get_device(input_x)
        xp = self.predictor.xp
        with device:
            output = self.predictor(input_x)
            batch_size, input_channel, input_h, input_w = input_x.shape
            batch_size, _, grid_h, grid_w = output.shape
            x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
            x = F.sigmoid(x)
            y = F.sigmoid(y)
            conf = F.sigmoid(conf)
            prob = F.transpose(prob, (0, 2, 1, 3, 4))
            prob = F.softmax(prob)
            prob = F.transpose(prob, (0, 2, 1, 3, 4))


            # convert coordinates to those on the image
            x_shift = xp.asarray(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape))
            y_shift = xp.asarray(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape))
            w_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape))
            h_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape))
            box_x = (x + x_shift) / grid_w
            box_y = (y + y_shift) / grid_h
            box_w = F.exp(w) * w_anchor / grid_w
            box_h = F.exp(h) * h_anchor / grid_h

            return box_x, box_y, box_w, box_h, conf, prob
yolov2_caltech.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def predict(self, input_x):
        if isinstance(input_x, chainer.Variable):
            device = cuda.get_device(input_x.data)
        else:
            device = cuda.get_device(input_x)
        xp = self.predictor.xp
        with device:
            output = self.predictor(input_x)
            batch_size, input_channel, input_h, input_w = input_x.shape
            batch_size, _, grid_h, grid_w = output.shape
            x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
            x = F.sigmoid(x)
            y = F.sigmoid(y)
            conf = F.sigmoid(conf)
            prob = F.transpose(prob, (0, 2, 1, 3, 4))
            prob = F.softmax(prob)
            prob = F.transpose(prob, (0, 2, 1, 3, 4))


            # convert coordinates to those on the image
            x_shift = xp.asarray(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape))
            y_shift = xp.asarray(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape))
            w_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape))
            h_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape))
            box_x = (x + x_shift) / grid_w
            box_y = (y + y_shift) / grid_h
            box_w = F.exp(w) * w_anchor / grid_w
            box_h = F.exp(h) * h_anchor / grid_h

            return box_x, box_y, box_w, box_h, conf, prob
weight_clip.py 文件源码 项目:binary_net 作者: hillbig 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, opt):
        if cuda.available:
            kernel = cuda.elementwise(
                'T low, T high', 
                'T p', 
                'p = (p < low) ? low : (p > high) ? high : p',
                'weight_clip')

        for param in opt.target.params():
            p = param.data
            with cuda.get_device(p) as dev:
                if int(dev) == -1:
                    numpy.clip(p, self.low, self.high)
                else:
                    kernel(self.low, self.high, p)
nonbias_weight_decay.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, opt):
        if cuda.available:
            kernel = cuda.elementwise(
                'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')

        rate = self.rate
        for name, param in opt.target.namedparams():
            if name == 'b' or name.endswith('/b'):
                continue
            p, g = param.data, param.grad
            with cuda.get_device(p) as dev:
                if int(dev) == -1:
                    g += rate * p
                else:
                    kernel(p, rate, g)
qlearning.py 文件源码 项目:malmo-challenge 作者: Kaixhin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, model, target, device_id=-1,
                 learning_rate=0.00025, momentum=.9,
                 minibatch_size=32, update_interval=10000):

        assert isinstance(model, ChainerModel), \
            'model should inherit from ChainerModel'

        super(QNeuralNetwork, self).__init__(model.input_shape,
                                             model.output_shape)

        self._gpu_device = None
        self._loss_val = 0

        # Target model update method
        self._steps = 0
        self._target_update_interval = update_interval

        # Setup model and target network
        self._minibatch_size = minibatch_size
        self._model = model
        self._target = target
        self._target.copyparams(self._model)

        # If GPU move to GPU memory
        if device_id >= 0:
            with cuda.get_device(device_id) as device:
                self._gpu_device = device
                self._model.to_gpu(device)
                self._target.to_gpu(device)

        # Setup optimizer
        self._optimizer = Adam(learning_rate, momentum, 0.999)
        self._optimizer.setup(self._model)
qlearning.py 文件源码 项目:malmo-challenge 作者: Microsoft 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, model, target, device_id=-1,
                 learning_rate=0.00025, momentum=.9,
                 minibatch_size=32, update_interval=10000):

        assert isinstance(model, ChainerModel), \
            'model should inherit from ChainerModel'

        super(QNeuralNetwork, self).__init__(model.input_shape,
                                             model.output_shape)

        self._gpu_device = None
        self._loss_val = 0

        # Target model update method
        self._steps = 0
        self._target_update_interval = update_interval

        # Setup model and target network
        self._minibatch_size = minibatch_size
        self._model = model
        self._target = target
        self._target.copyparams(self._model)

        # If GPU move to GPU memory
        if device_id >= 0:
            with cuda.get_device(device_id) as device:
                self._gpu_device = device
                self._model.to_gpu(device)
                self._target.to_gpu(device)

        # Setup optimizer
        self._optimizer = Adam(learning_rate, momentum, 0.999)
        self._optimizer.setup(self._model)
chainer_utility.py 文件源码 项目:Comicolorization 作者: DwangoMediaVillage 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _concat_arrays(arrays):
    xp = cuda.get_array_module(arrays[0])
    with cuda.get_device(arrays[0]):
        return xp.concatenate([array[None] for array in arrays])
nonbias_weight_decay.py 文件源码 项目:async-rl 作者: muupan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, opt):
        if cuda.available:
            kernel = cuda.elementwise(
                'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')

        rate = self.rate
        for name, param in opt.target.namedparams():
            if name == 'b' or name.endswith('/b'):
                continue
            p, g = param.data, param.grad
            with cuda.get_device(p) as dev:
                if int(dev) == -1:
                    g += rate * p
                else:
                    kernel(p, rate, g)
generate.py 文件源码 项目:chainer-stack-gan 作者: dsanno 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def main():
    args = parse_args()
    gen1 = net.Generator1()
    chainer.serializers.load_npz(args.model_path, gen1)
    device_id = None
    if args.gpu >= 0:
        device_id = args.gpu
        cuda.get_device(device_id).use()
        gen1.to_gpu(device_id)

    out_vector_path = None
    np.random.seed(1)
    if args.vector_file1 and args.vector_index1 >= 0 and args.vector_file2 and args.vector_index2 >= 0:
        with open(args.vector_file1, 'rb') as f:
            z = np.load(f)
            z1 = z[args.vector_index1]
        with open(args.vector_file2, 'rb') as f:
            z = np.load(f)
            z2 = z[args.vector_index2]
        w = np.arange(10).astype(np.float32).reshape((-1, 1)) / 9
        z = (1 - w) * z1 + w * z2
        z = z / (np.linalg.norm(z, axis=1, keepdims=True) + 1e-12)
    else:
        z = np.random.normal(0, 1, (100, latent_size)).astype(np.float32)
        out_vector_path = '{}.npy'.format(args.output)
        z = z / (np.linalg.norm(z, axis=1, keepdims=True) + 1e-12)

    with chainer.no_backprop_mode():
        if device_id is None:
            x = gen1(z, train=False)
        else:
            x = gen1(cuda.to_gpu(z, device_id), train=False)
    x = cuda.to_cpu(x.data)
    batch, ch, h, w = x.shape
    x = x.reshape((-1, 10, ch, h, w)).transpose((0, 3, 1, 4, 2)).reshape((-1, 10 * w, ch))
    x = ((x + 1) * 127.5).clip(0, 255).astype(np.uint8)
    Image.fromarray(x).save('{}.jpg'.format(args.output))
    if out_vector_path:
        with open(out_vector_path, 'wb') as f:
            np.save(f, z)
lbfgs.py 文件源码 项目:chainer-dfi 作者: dsanno 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def init_state(self, param):
        with cuda.get_device(param.data):
            self.state['s'] = []
link.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def to_gpu(self, device=None):
        """Copies parameter variables and persistent values to GPU.

        This method does not handle non-registered attributes. If some of such
        attributes must be copied to GPU, the link implementation must
        override this method to do so.

        Args:
            device: Target device specifier. If omitted, the current device is
                used.

        Returns: self

        """
        cuda.check_cuda_available()
        if not self._cpu:
            return self
        d = self.__dict__
        with cuda.get_device(device):
            for name in self._params:
                d[name].to_gpu()
            for name in self._persistent:
                value = d[name]
                if isinstance(value, numpy.ndarray):
                    d[name] = cuda.to_gpu(value)
        self._cpu = False
        return self
link.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def to_gpu(self, device=None):
        with cuda.get_device(device):
            super(Chain, self).to_gpu()
            d = self.__dict__
            for name in self._children:
                d[name].to_gpu()
        return self
link.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def to_gpu(self, device=None):
        with cuda.get_device(device):
            super(ChainList, self).to_gpu()
            for link in self._children:
                link.to_gpu()
        return self
hierarchical_softmax.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def to_gpu(self, device=None):
        with cuda.get_device(device):
            self.paths = cuda.to_gpu(self.paths)
            self.codes = cuda.to_gpu(self.codes)
            self.begins = cuda.to_gpu(self.begins)
negative_sampling.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def to_gpu(self, device=None):
        with cuda.get_device(device):
            super(NegativeSampling, self).to_gpu()
            self.sampler.to_gpu()
rmsprop.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def init_state(self, param, state):
        xp = cuda.get_array_module(param.data)
        with cuda.get_device(param.data):
            state['ms'] = xp.zeros_like(param.data)


问题


面经


文章

微信
公众号

扫码关注公众号