python类cupy()的实例源码

vaelm.py 文件源码 项目:vaelm 作者: TatsuyaShirakawa 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def set_by_sample(self, train=True):
        xp = self.xp
        use_gpu = (xp == cuda.cupy)
        for i in range(self.num_layers):
            # h
            mu, sigma = self.hmus[i], self.hsigmas[i]
            e = np.random.normal(0., 1., self.z_size).astype(np.float32)
            if use_gpu:
                e = cuda.to_gpu(e)
            self.decoder.set_h(i, self.get_zh(i)(mu + e * sigma))

            # c
            mu, sigma = self.cmus[i], self.csigmas[i]
            e = np.random.normal(0., 1., self.z_size).astype(np.float32)
            if use_gpu:
                e = cuda.to_gpu(e)
            self.decoder.set_c(i, self.get_zc(i)(mu + e * sigma))
neural_style.py 文件源码 项目:chainer-neural-style 作者: dsanno 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, model, optimizer, content_weight, style_weight, tv_weight, content_layers, style_layers, resolution_num=1, device_id=-1, initial_image='random', keep_color=False):
        self.model = model
        self.optimizer = optimizer
        self.content_weight = content_weight
        self.style_weight = style_weight
        self.tv_weight = tv_weight
        self.device_id = device_id
        self.content_layer_names = content_layers
        self.style_layer_names = style_layers
        self.resolution_num = resolution_num
        self.initial_image = initial_image
        self.keep_color = keep_color
        if device_id >= 0:
            self.xp = cuda.cupy
            self.model.to_gpu(device_id)
        else:
            self.xp = np
neural_style.py 文件源码 项目:chainer-neural-style 作者: dsanno 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, model, optimizer, content_weight, style_weight, tv_weight, content_layers, style_layers, resolution_num=1, device_id=-1, initial_image='content', keep_color=False):
        self.model = model
        self.optimizer = optimizer
        self.content_weight = content_weight
        self.style_weight = style_weight
        self.tv_weight = tv_weight
        self.device_id = device_id
        self.content_layer_names = content_layers
        self.style_layer_names = style_layers
        self.resolution_num = resolution_num
        self.initial_image = initial_image
        self.keep_color = keep_color
        if device_id >= 0:
            self.xp = cuda.cupy
            self.model.to_gpu(device_id)
        else:
            self.xp = np
grad_check.py 文件源码 项目:double-dqn 作者: musyoku 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def backprop_check():
    xp = cuda.cupy if config.use_gpu else np
    duel = DDQN()

    state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
    reward = [1, 0]
    action = [3, 4]
    episode_ends = [0, 0]
    next_state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)

    optimizer_conv = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
    optimizer_conv.setup(duel.conv)
    optimizer_fc = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
    optimizer_fc.setup(duel.fc)

    for i in xrange(10000):
        optimizer_conv.zero_grads()
        optimizer_fc.zero_grads()
        loss, _ = duel.forward_one_step(state, action, reward, next_state, episode_ends)
        loss.backward()
        optimizer_conv.update()
        optimizer_fc.update()
        print loss.data,
        print duel.conv.layer_2.W.data[0, 0, 0, 0],
        print duel.fc.layer_2.W.data[0, 0],
nutszebra_chainer.py 文件源码 项目:trainer 作者: nutszebra 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def check_gpu(self, gpu):
        """Check cuda.cupy

        Example:

        ::

            gpu = 0
            self.check_gpu(gpu)

        Args:
            gpu (int): gpu id
        """

        if gpu >= 0:
            cuda.get_device(gpu).use()
            self.to_gpu(gpu)
            return True
        return False
nutszebra_chainer.py 文件源码 项目:trainer 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _check_cupy():
        """Set xp

        Note:
            cuda.cupy if gpu, numpy otherwise

        Example:

        ::

            self.xp = self._check_cupy()

        Returns:
            cuda.cupy if gpu, numpy otherwise
        """

        try:
            cuda.check_cuda_available()
            return cuda.cupy
        # if gpu is not available, RuntimeError arises
        except RuntimeError:
            return np
error.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, use_cudnn=True, normalize=True, cache_score=True,
                 class_weight=None, ignore_label=-1, reduce='mean'):
        self.use_cudnn = use_cudnn
        self.normalize = normalize
        self.cache_score = cache_score
        self.class_weight = class_weight
        if class_weight is not None:
            if self.class_weight.ndim != 1:
                raise ValueError('class_weight.ndim should be 1')
            if self.class_weight.dtype.kind != 'f':
                raise ValueError('The dtype of class_weight should be \'f\'')
            if isinstance(self.class_weight, chainer.Variable):
                raise ValueError('class_weight should be a numpy.ndarray or '
                                 'cupy.ndarray, not a chainer.Variable')
        self.ignore_label = ignore_label
        if reduce not in ('mean', 'no'):
            raise ValueError(
                "only 'mean' and 'no' are valid for 'reduce', but '%s' is "
                'given' % reduce)
        self.reduce = reduce
cross_entropy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def backward_gpu(self, inputs, grad_outputs):
        cupy = cuda.cupy
        x, t = inputs
        gloss = grad_outputs[0]
        n_unit = t.size // len(t)
        coeff = gloss * self._coeff
        gx = cuda.elementwise(
            'T y, S t, raw T coeff, S n_channel, S n_unit',
            'T gx',
            '''
               const int c = (i / n_unit % n_channel);
               gx = ((t == -1) || (c != t)) ? 0 : (coeff[0] / max(y, 1e-5));
            ''',
            'softmax_crossent_bwd')(
                self.y, cupy.expand_dims(t, 1), -coeff, x.shape[1], n_unit)
        return gx, None
softmax_cross_entropy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = softmax_log(x, self.use_cudnn)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? T(0) : log_y[_j * n_channel + t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
softmax_cross_entropy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def backward_gpu(self, inputs, grad_outputs):
        cupy = cuda.cupy
        x, t = inputs
        if hasattr(self, 'y'):
            y = self.y
        else:
            y = softmax_log(x, self.use_cudnn)
            cupy.exp(y, out=y)
        gloss = grad_outputs[0]
        n_unit = t.size // len(t)
        coeff = gloss * self._coeff
        gx = cuda.elementwise(
            'T y, S t, raw T coeff, S n_channel, S n_unit',
            'T gx',
            '''
               const int c = (i / n_unit % n_channel);
               gx = (t == -1) ? 0 : (coeff[0] * (y - (c == t)));
            ''',
            'softmax_crossent_bwd')(
                y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)
        return gx, None
weighted_cross_entropy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        log_y = cupy.log(x + 1e-5)
        self.y = x

    if(self.debug):
        ipdb.set_trace()

        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff, raw T weights', 'T out',
            't == -1 ? 0 : log_y[_j * n_channel + t] * weights[t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff, self.weights.reduced_view())
        return ret,
test_variable.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_copydata_gpu_to_another_gpu(self):
        cp = cuda.cupy
        with cuda.get_device(0):
            data1 = cp.zeros(3, dtype=np.float32)
            expect = cp.ones(3, dtype=np.float32)
        with cuda.get_device(1):
            data2 = cp.ones(3, dtype=np.float32)
        self.check_copydata(data1, data2, expect)
supervised_trainer.py 文件源码 项目:masalachai 作者: DaikiShimada 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def supervised_update(self):
        # array backend
        xp = cuda.cupy if self.gpu >= 0 else numpy

        # read data
        data = self.train_data_queues[0].get()
        vx = tuple([chainer.Variable(xp.asarray(data[k]))
                    for k in data.keys() if 'data' in k])
        vt = tuple([chainer.Variable(xp.asarray(data[k]))
                    for k in data.keys() if 'target' in k])

        # forward and update
        self.optimizer.update(self.optimizer.target, vx, vt)

        # get result
        res = {'loss': float(self.optimizer.target.loss.data)}
        if self.optimizer.target.accuracy is not None:
            res['accuracy'] = float(self.optimizer.target.accuracy.data)
        return res
at.py 文件源码 项目:masalachai 作者: DaikiShimada 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def supervised_update(self):
        # array backend
        xp = cuda.cupy if self.gpu >= 0 else numpy

        self.accuracy = None

        # read data
        data = self.train_data_queues[0].get()
        vx = tuple([chainer.Variable(xp.asarray(data[k]))
                    for k in data.keys() if 'data' in k])
        vt = tuple([chainer.Variable(xp.asarray(data[k]))
                    for k in data.keys() if 'target' in k])

        self.optimizer.update(self.adversarial_loss, vx, vt)

        # get result
        res = {'loss': float(self.loss.data),
               'adversarial_loss': float(self.adv_loss.data)}
        if self.accuracy is not None:
            res['accuracy'] = self.accuracy
        return res
updater.py 文件源码 项目:chainer-wasserstein-gan 作者: hvy 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, *, iterator, noise_iterator, optimizer_generator,
                 optimizer_critic, device=-1):

        if optimizer_generator.target.name is None:
            optimizer_generator.target.name = 'generator'

        if optimizer_critic.target.name is None:
            optimizer_critic.target.name = 'critic'

        iterators = {'main': iterator, 'z': noise_iterator}
        optimizers = {'generator': optimizer_generator,
                      'critic': optimizer_critic}

        super().__init__(iterators, optimizers, device=device)

        if device >= 0:
            cuda.get_device(device).use()
            [optimizer.target.to_gpu() for optimizer in optimizers.values()]

        self.xp = cuda.cupy if device >= 0 else np
captioning.py 文件源码 项目:ImageCaptioning 作者: rkuga 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self,gpu,batchsize,data_dir,dataset,net,mode,epochs,save_every,size,**kwargs):
        super(Network, self).__init__(epochs,save_every)
        print "building ..."
        self.input_height=size
        self.input_width=size
        self.net = net
        self.mode=mode
        self.dataset=dataset
        self.train_data, self.test_data=self.get_dataset(data_dir,dataset)
        print 'input_channel ==> %d using %s dataset'%(self.in_channel, self.dataset)

        self.enc = GoogLeNet()
        self.dec = Decoder(self.in_size)

        self.xp = cuda.cupy
        cuda.get_device(gpu).use()

        self.enc.to_gpu()
        self.dec.to_gpu()

        self.o_dec = optimizers.RMSpropGraves()
        self.o_dec.setup(self.dec)

        self.batchsize=batchsize
trainer.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, net, optimizer, epoch_num=100, batch_size=100, device_id=-1, lr_shape='multistep', lr_decay=[0]):
        self.net = net
        self.optimizer = optimizer
        self.epoch_num = epoch_num
        self.batch_size = batch_size
        self.device_id = device_id
        if hasattr(optimizer, 'alpha'):
            self.initial_lr = optimizer.alpha
        else:
            self.initial_lr = optimizer.lr
        self.lr_shape = lr_shape
        self.lr_decay = lr_decay
        if device_id >= 0:
            self.xp = cuda.cupy
            self.net.to_gpu(device_id)
        else:
            self.xp = np
cross_entropy.py 文件源码 项目:nmtrain 作者: philip30 项目源码 文件源码 阅读 77 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
    cupy = cuda.cupy
    x, t = inputs
    if chainer.is_debug():
      self._check_input_values(x, t)

    log_y = cupy.log(x)
    if self.cache_score:
      self.y = x
    if getattr(self, 'normalize', True):
      coeff = cupy.maximum(1, (t != self.ignore_label).sum())
    else:
      coeff = max(1, len(t))
    self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

    log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
    ret = cuda.reduce(
      'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
      't == -1 ? 0 : log_y[_j * n_channel + t]',
      'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
    )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
    return ret,
cross_entropy.py 文件源码 项目:nmtrain 作者: philip30 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def backward_gpu(self, inputs, grad_outputs):
    cupy = cuda.cupy
    x, t = inputs
    if hasattr(self, 'y'):
      y = self.y
    else:
      y = x
    gloss = grad_outputs[0]
    n_unit = t.size // len(t)
    coeff = gloss * self._coeff
    gx = cuda.elementwise(
      'T y, S t, raw T coeff, S n_channel, S n_unit',
      'T gx',
      '''
         const int c = (i / n_unit % n_channel);
         gx = (t == -1 || c != t) ? 0 : (coeff[0] * -1.0 / y);
      ''',
      'crossent_bwd')(
          y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)
    return gx, None
error.py 文件源码 项目:chainer-glu 作者: musyoku 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, use_cudnn=True, normalize=True, cache_score=True,
                 class_weight=None, ignore_label=-1, reduce='mean'):
        self.use_cudnn = use_cudnn
        self.normalize = normalize
        self.cache_score = cache_score
        self.class_weight = class_weight
        if class_weight is not None:
            if self.class_weight.ndim != 1:
                raise ValueError('class_weight.ndim should be 1')
            if self.class_weight.dtype.kind != 'f':
                raise ValueError('The dtype of class_weight should be \'f\'')
            if isinstance(self.class_weight, chainer.Variable):
                raise ValueError('class_weight should be a numpy.ndarray or '
                                 'cupy.ndarray, not a chainer.Variable')
        self.ignore_label = ignore_label
        if reduce not in ('mean', 'no'):
            raise ValueError(
                "only 'mean' and 'no' are valid for 'reduce', but '%s' is "
                'given' % reduce)
        self.reduce = reduce
model.py 文件源码 项目:deep-learning-for-human-part-discovery-in-images 作者: shiba24 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def calculate_accuracy(self, predictions, truths):
        if cuda.get_array_module(predictions.data) == cuda.cupy:
            with predictions.data.device:
                predictions =  predictions.data.get()
            with truths.data.device:
                truths = truths.data.get()
        else:
            predictions = predictions.data
            truths = truths.data

        # we want to exclude labels with -1
        mask = truths != -1
        # reduce values along classe axis
        reduced_preditions = np.argmax(predictions, axis=1) 
        # mask
        masked_reduced_preditions = reduced_preditions[mask]
        masked_truths = truths[mask] 
        s = (masked_reduced_preditions == masked_truths).mean()
        return s
softmax_cross_entropy.py 文件源码 项目:chainer-cf-nade 作者: dsanno 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t, w = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = softmax_log(x, self.use_cudnn)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, T w, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? T(0) : log_y[_j * n_channel + t] * w',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, w, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
softmax_cross_entropy.py 文件源码 项目:chainer-cf-nade 作者: dsanno 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def backward_gpu(self, inputs, grad_outputs):
        cupy = cuda.cupy
        x, t, w = inputs
        if hasattr(self, 'y'):
            y = self.y
        else:
            y = softmax_log(x, self.use_cudnn)
            cupy.exp(y, out=y)
        gloss = grad_outputs[0]
        n_unit = t.size // len(t)
        coeff = gloss * self._coeff * w
        gx = cuda.elementwise(
            'T y, S t, raw T coeff, S n_channel, S n_unit',
            'T gx',
            '''
               const int c = (i / n_unit % n_channel);
               gx = (t == -1) ? 0 : (coeff[0] * (y - (c == t)));
            ''',
            'softmax_crossent_bwd')(
                y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)
        return gx, None, None
convolution_rbm.py 文件源码 项目:SeRanet 作者: corochann 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, in_channels, out_channels, ksize, stride=1, real=0, wscale=1.0):
        super(ConvolutionRBM, self).__init__(
            conv=L.Convolution2D(in_channels, out_channels, ksize, stride=stride, wscale=wscale),
        )

#        if gpu >= 0:
#            cuda.check_cuda_available()
#            xp = cuda.cupy # if gpu >= 0 else np
        self.conv.add_param("a", in_channels)  # dtype=xp.float32
        self.conv.a.data.fill(0.)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.ksize = ksize
        self.real = real

        self.rbm_train = False  # default value is false
convolution_rbm.py 文件源码 项目:SeRanet 作者: corochann 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def sample_h_given_v(self, v0_sample):
        """ get a sample of the hiddens by gibbs sampling
        :param v0_sample: Variable, see vis above
        :return:
        h1_mean:   Variable Matrix(batch_size, out_channels, image_height_out, image_width_out)
        h1_sample: Variable Matrix(batch_size, out_channels, image_height_out, image_width_out)
                   - actual sample for hidden units, populated by 0 or 1.
        """
        h1_mean = self.propup(v0_sample)
        xp = cuda.get_array_module(h1_mean.data)
        if xp == cuda.cupy:
            h1_sample = cuda.cupy.random.random_sample(size=h1_mean.data.shape)
            h1_sample[:] = h1_sample[:] < h1_mean.data[:]
        else:  # xp == np
            h1_sample = np.random.binomial(size=h1_mean.data.shape, n=1, p=h1_mean.data)
        return h1_mean, Variable(h1_sample.astype(xp.float32))
grad_check.py 文件源码 项目:reinforcement-learning 作者: musyoku 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward_check():
    xp = cuda.cupy if config.use_gpu else np
    out_head = 2
    in_head = 3
    n_x = 100
    state = xp.ones((2, n_x)).astype(xp.float32)
    state = Variable(state)
    initial_weight = np.ones((config.q_k_heads * in_head, n_x))
    shared = L.Linear(n_x, config.q_k_heads * in_head, initialW=initial_weight)
    initial_weight = np.ones((out_head * config.q_k_heads, in_head * config.q_k_heads))
    link1 = model.LinearHead(in_head, out_head, config.q_k_heads, initialW=initial_weight)
    initial_weight = np.ones((in_head * config.q_k_heads, out_head * config.q_k_heads))
    link2 = model.LinearHead(out_head, in_head, config.q_k_heads, initialW=initial_weight)
    if config.use_gpu:
        link1.to_gpu()
        link2.to_gpu()
        shared.to_gpu()
    output = link2(link1(shared(state)))
    print output.data
lstm_decoder.py 文件源码 项目:DSTC6-End-to-End-Conversation-Modeling 作者: dialogtekgeek 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def update(self, s, i):
        """Update decoder state

        Args:
            s (any): Current (hidden, cell) states.  If ``None`` is specified 
                     zero-vector is used.
            i (int): input label.
        Return:
            (~chainer.Variable) updated decoder state
        """
        if cuda.get_device_from_array(s[0].data).id >= 0:
            xp = cuda.cupy
        else:
            xp = np

        v = chainer.Variable(xp.array([i],dtype=np.int32))
        x = self.embed(v)
        if s is not None:
            hy, cy, dy = self.lstm(s[0], s[1], [x])
        else:
            hy, cy, dy = self.lstm(None, None, [x])

        return hy, cy, dy
lstm_decoder.py 文件源码 项目:DSTC6-End-to-End-Conversation-Modeling 作者: dialogtekgeek 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def update(self, s, i):
        """Update decoder state

        Args:
            s (any): Current (hidden, cell) states.  If ``None`` is specified 
                     zero-vector is used.
            i (int): input label.
        Return:
            (~chainer.Variable) updated decoder state
        """
        if cuda.get_device_from_array(s[0].data).id >= 0:
            xp = cuda.cupy
        else:
            xp = np

        v = chainer.Variable(xp.array([i],dtype=np.int32))
        x = self.embed(v)
        if s is not None:
            hy, cy, dy = self.lstm(s[0], s[1], [x])
        else:
            hy, cy, dy = self.lstm(None, None, [x])

        return hy, cy, dy
lstm_decoder.py 文件源码 项目:DSTC6-End-to-End-Conversation-Modeling 作者: dialogtekgeek 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def update(self, s, i):
        """Update decoder state

        Args:
            s (any): Current (hidden, cell) states.  If ``None`` is specified 
                     zero-vector is used.
            i (int): input label.
        Return:
            (~chainer.Variable) updated decoder state
        """
        if cuda.get_device_from_array(s[0].data).id >= 0:
            xp = cuda.cupy
        else:
            xp = np

        v = chainer.Variable(xp.array([i],dtype=np.int32))
        x = self.embed(v)
        if s is not None:
            hy, cy, dy = self.lstm(s[0], s[1], [x])
        else:
            hy, cy, dy = self.lstm(None, None, [x])

        return hy, cy, dy
lstm_decoder.py 文件源码 项目:DSTC6-End-to-End-Conversation-Modeling 作者: dialogtekgeek 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def update(self, s, i):
        """Update decoder state

        Args:
            s (any): Current (hidden, cell) states.  If ``None`` is specified 
                     zero-vector is used.
            i (int): input label.
        Return:
            (~chainer.Variable) updated decoder state
        """
        if cuda.get_device_from_array(s[0].data).id >= 0:
            xp = cuda.cupy
        else:
            xp = np

        v = chainer.Variable(xp.array([i],dtype=np.int32))
        x = self.embed(v)
        if s is not None:
            hy, cy, dy = self.lstm(s[0], s[1], [x])
        else:
            hy, cy, dy = self.lstm(None, None, [x])

        return hy, cy, dy


问题


面经


文章

微信
公众号

扫码关注公众号