python类IntTensor()的实例源码

pytorch_misc.py 文件源码 项目:pytorch-seq2seq 作者: rowanz 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def transpose_packed_sequence(ps):
    """
    Goes from a TxB packed sequence to a BxT or vice versa. Assumes that nothing is a variable
    :param ps: PackedSequence
    :return:
    """
    data, batch_sizes = ps
    seq_lens = transpose_batch_sizes(batch_sizes)

    # Put things in the permutation matrix one way, take out another way
    perm_mat = torch.IntTensor(batch_sizes[0], len(batch_sizes)).long().zero_()
    cur = 0
    for i, sl in enumerate(seq_lens):
        for col_ind in range(sl):
            perm_mat[i, col_ind] = cur + col_ind
        cur += sl
    perm = pack_padded_sequence(perm_mat, seq_lens, batch_first=True).data
    return PackedSequence(data[perm], seq_lens)
crnn_main.py 文件源码 项目:crnn 作者: wulivicte 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def trainBatch(net, criterion, optimizer):
    data = train_iter.next()
    cpu_images, cpu_texts = data
    batch_size = cpu_images.size(0)
    utils.loadData(image, cpu_images)
    t, l = converter.encode(cpu_texts)
    utils.loadData(text, t)
    utils.loadData(length, l)

    preds = crnn(image)
    preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
    cost = criterion(preds, text, preds_size, length) / batch_size
    crnn.zero_grad()
    cost.backward()
    optimizer.step()
    return cost
utils.py 文件源码 项目:crnn 作者: wulivicte 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encode(self, text):
        """Support batch or single str.

        Args:
            text (str or list of str): texts to convert.

        Returns:
            torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
            torch.IntTensor [n]: length of each text.
        """
        length = []
        result = []
        for item in text:            
            if self.is_chinese(item):          
               item = unicode(item,'utf-8')
            length.append(len(item))
            for char in item:               
                index = self.dict[char]                
                result.append(index)
        text = result
        return (torch.IntTensor(text), torch.IntTensor(length))
LookupTable.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None):
        super(LookupTable, self).__init__()
        self.weight = torch.Tensor(nIndex, nOutput)
        self.gradWeight = torch.Tensor(nIndex, nOutput).zero_()
        self.paddingValue = paddingValue
        self.maxNorm = maxNorm
        self.normType = normType
        self.shouldScaleGradByFreq = False

        self._gradOutput = None
        self._sorted = None
        self._indices = None

        self._count = torch.IntTensor()
        self._input = torch.LongTensor()

        self.reset()
LookupTable.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def type(self, type=None, tensorCache=None):
        if not type:
            return self._type
        super(LookupTable, self).type(type, tensorCache)

        if type == 'torch.cuda.FloatTensor':
            # CUDA uses _sorted and _indices temporary tensors
            self._sorted = torch.cuda.LongTensor()
            self._indices = torch.cuda.LongTensor()
            self._count = torch.cuda.LongTensor()
            self._input = torch.cuda.LongTensor()
        else:
            # self._count and self._input should only be converted if using Cuda
            self._count = torch.IntTensor()
            self._input = torch.LongTensor()

        return self
test_cuda.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_serialization(self):
        x = torch.randn(5, 5).cuda()
        y = torch.IntTensor(2, 5).fill_(0).cuda()
        q = [x, y, x, y.storage()]
        with tempfile.NamedTemporaryFile() as f:
            torch.save(q, f)
            f.seek(0)
            q_copy = torch.load(f)
        self.assertEqual(q_copy, q, 0)
        q_copy[0].fill_(5)
        self.assertEqual(q_copy[0], q_copy[2], 0)
        self.assertTrue(isinstance(q_copy[0], torch.cuda.DoubleTensor))
        self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
        self.assertTrue(isinstance(q_copy[2], torch.cuda.DoubleTensor))
        self.assertTrue(isinstance(q_copy[3], torch.cuda.IntStorage))
        q_copy[1].fill_(10)
        self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            self.assertEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2^31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def test_last_dim_softmax_handles_mask_correctly(self):
        batch_size = 1
        length_1 = 4
        length_2 = 3
        num_options = 5
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1, 6]
        mask = Variable(torch.IntTensor([[1, 1, 1, 1, 0]]))
        options_tensor = Variable(torch.from_numpy(options_array).float())
        softmax_tensor = util.last_dim_softmax(options_tensor, mask).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371, 0.0],
                                    decimal=5)
coref.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _prune_and_sort_spans(mention_scores: torch.FloatTensor,
                              num_spans_to_keep: int) -> torch.IntTensor:
        """
        The indices of the top-k scoring spans according to span_scores. We return the
        indices in their original order, not ordered by score, so that we can rely on
        the ordering to consider the previous k spans as antecedents for each span later.

        Parameters
        ----------
        mention_scores : ``torch.FloatTensor``, required.
            The mention score for every candidate, with shape (batch_size, num_spans, 1).
        num_spans_to_keep : ``int``, required.
            The number of spans to keep when pruning.
        Returns
        -------
        top_span_indices : ``torch.IntTensor``, required.
            The indices of the top-k scoring spans. Has shape (batch_size, num_spans_to_keep).
        """
        # Shape: (batch_size, num_spans_to_keep, 1)
        _, top_span_indices = mention_scores.topk(num_spans_to_keep, 1)
        top_span_indices, _ = torch.sort(top_span_indices, 1)

        # Shape: (batch_size, num_spans_to_keep)
        top_span_indices = top_span_indices.squeeze(-1)
        return top_span_indices
bleu.py 文件源码 项目:ParlAI 作者: facebookresearch 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def add(self, ref, pred):
        if not isinstance(ref, torch.IntTensor):
            raise TypeError('ref must be a torch.IntTensor (got {})'
                            .format(type(ref)))
        if not isinstance(pred, torch.IntTensor):
            raise TypeError('pred must be a torch.IntTensor(got {})'
                            .format(type(pred)))

        assert self.unk > 0, 'unknown token index must be >0'
        rref = ref.clone()
        rref.apply_(lambda x: x if x != self.unk else -x)

        rref = rref.contiguous().view(-1)
        pred = pred.contiguous().view(-1)

        C.bleu_add(
            ctypes.byref(self.stat),
            ctypes.c_size_t(rref.size(0)),
            ctypes.c_void_p(rref.data_ptr()),
            ctypes.c_size_t(pred.size(0)),
            ctypes.c_void_p(pred.data_ptr()),
            ctypes.c_int(self.pad),
            ctypes.c_int(self.eos))
util.py 文件源码 项目:sceneReco 作者: bear63 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def decode(self, t, length, raw=False):
        if length.numel() == 1:
            length = length[0]
            t = t[:length]
            if raw:
                return ''.join([self.alphabet[i - 1] for i in t])
            else:
                char_list = []
                for i in range(length):
                    if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
                        char_list.append(self.alphabet[t[i] - 1])
                return ''.join(char_list)
        else:
            texts = []
            index = 0
            for i in range(length.numel()):
                l = length[i]
                texts.append(self.decode(
                    t[index:index + l], torch.IntTensor([l]), raw=raw))
                index += l
            return texts
darknet.py 文件源码 项目:pytorch-caffe-darknet-convert 作者: marvis 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, cfgfile):
        super(Darknet, self).__init__()
        self.blocks = parse_cfg(cfgfile)
        self.models = self.create_network(self.blocks) # merge conv, bn,leaky
        self.loss = self.models[len(self.models)-1]

        self.width = int(self.blocks[0]['width'])
        self.height = int(self.blocks[0]['height'])

        if self.blocks[(len(self.blocks)-1)]['type'] == 'region':
            region_block = self.blocks[(len(self.blocks)-1)]
            anchors = region_block['anchors'].split(',')
            self.anchors = [float(i) for i in anchors]
            self.num_anchors = int(region_block['num'])
            self.anchor_step = len(self.anchors)/self.num_anchors
            self.num_classes = int(region_block['classes'])

        self.header = torch.IntTensor([0,0,0,0])
        self.seen = 0
        self.has_mean = False
roi_pool.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
        argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()

        if not features.is_cuda:
            _features = features.permute(0, 2, 3, 1)
            roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
                                            _features, rois, output)
            # output = output.cuda()
        else:
            output = output.cuda()
            argmax = argmax.cuda()
            roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
                                                 features, rois, output, argmax)
            self.output = output
            self.argmax = argmax
            self.rois = rois
            self.feature_size = features.size()

        return output
miniImagenet.py 文件源码 项目:FewShotLearning 作者: gitabcworld 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __getitem__(self, index):

        support_set_x = torch.FloatTensor(self.n_samples, 3, 84, 84)
        support_set_y = np.zeros((self.n_samples), dtype=np.int)
        target_x = torch.FloatTensor(self.n_samples_eval, 3, 84, 84)
        target_y = np.zeros((self.n_samples_eval), dtype=np.int)

        flatten_support_set_x_batch = [os.path.join(self.miniImagenetImagesDir,item)
                                       for sublist in self.support_set_x_batch[index] for item in sublist]
        support_set_y = np.array([self.classes_dict[item[:9]]
                                      for sublist in self.support_set_x_batch[index] for item in sublist])
        flatten_target_x = [os.path.join(self.miniImagenetImagesDir,item)
                            for sublist in self.target_x_batch[index] for item in sublist]
        target_y = np.array([self.classes_dict[item[:9]]
                            for sublist in self.target_x_batch[index] for item in sublist])

        for i,path in enumerate(flatten_support_set_x_batch):
            if self.transform is not None:
                support_set_x[i] = self.transform(path)

        for i,path in enumerate(flatten_target_x):
            if self.transform is not None:
                target_x[i] = self.transform(path)
        return support_set_x, torch.IntTensor(support_set_y), target_x, torch.IntTensor(target_y)
bleu.py 文件源码 项目:fairseq-py 作者: facebookresearch 项目源码 文件源码 阅读 58 收藏 0 点赞 0 评论 0
def add(self, ref, pred):
        if not isinstance(ref, torch.IntTensor):
            raise TypeError('ref must be a torch.IntTensor (got {})'
                            .format(type(ref)))
        if not isinstance(pred, torch.IntTensor):
            raise TypeError('pred must be a torch.IntTensor(got {})'
                            .format(type(pred)))

        assert self.unk > 0, 'unknown token index must be >0'
        rref = ref.clone()
        rref.apply_(lambda x: x if x != self.unk else -x)

        rref = rref.contiguous().view(-1)
        pred = pred.contiguous().view(-1)

        C.bleu_add(
            ctypes.byref(self.stat),
            ctypes.c_size_t(rref.size(0)),
            ctypes.c_void_p(rref.data_ptr()),
            ctypes.c_size_t(pred.size(0)),
            ctypes.c_void_p(pred.data_ptr()),
            ctypes.c_int(self.pad),
            ctypes.c_int(self.eos))
roi_pool.py 文件源码 项目:MSDN 作者: yikang-li 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
        argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()

        if not features.is_cuda:
            assert False, 'feature not in CUDA'
            _features = features.permute(0, 2, 3, 1)
            roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
                                            _features, rois, output)
            # output = output.cuda()
        else:
            output = output.cuda()
            argmax = argmax.cuda()
            roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
                                                 features, rois, output, argmax)
            self.output = output
            self.argmax = argmax
            self.rois = rois
            self.feature_size = features.size()

        return output
roi_pool.py 文件源码 项目:yolo2-pytorch 作者: longcw 项目源码 文件源码 阅读 69 收藏 0 点赞 0 评论 0
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
        argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()

        if not features.is_cuda:
            _features = features.permute(0, 2, 3, 1)
            roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
                                            _features, rois, output)
            # output = output.cuda()
        else:
            output = output.cuda()
            argmax = argmax.cuda()
            roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
                                                 features, rois, output, argmax)
            self.output = output
            self.argmax = argmax
            self.rois = rois
            self.feature_size = features.size()

        return output
LookupTable.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None):
        super(LookupTable, self).__init__()
        self.weight = torch.Tensor(nIndex, nOutput)
        self.gradWeight = torch.Tensor(nIndex, nOutput).zero_()
        self.paddingValue = paddingValue
        self.maxNorm = maxNorm
        self.normType = normType
        self.shouldScaleGradByFreq = False

        self._gradOutput = None
        self._sorted = None
        self._indices = None

        self._count = torch.IntTensor()
        self._input = torch.LongTensor()

        self.reset()
LookupTable.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def type(self, type=None, tensorCache=None):
        if type is None:
            return self._type
        super(LookupTable, self).type(type, tensorCache)

        if type == 'torch.cuda.FloatTensor':
            # CUDA uses _sorted and _indices temporary tensors
            self._sorted = torch.cuda.LongTensor()
            self._indices = torch.cuda.LongTensor()
            self._count = torch.cuda.LongTensor()
            self._input = torch.cuda.LongTensor()
        else:
            # self._count and self._input should only be converted if using Cuda
            self._count = torch.IntTensor()
            self._input = torch.LongTensor()

        return self
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_type_conversions(self):
        x = Variable(torch.randn(5, 5))
        self.assertIs(type(x.float().data), torch.FloatTensor)
        self.assertIs(type(x.int().data), torch.IntTensor)
        if torch.cuda.is_available():
            self.assertIs(type(x.float().cuda().data), torch.cuda.FloatTensor)
            self.assertIs(type(x.int().cuda().data), torch.cuda.IntTensor)
            self.assertIs(type(x.int().cuda().cpu().data), torch.IntTensor)
            if torch.cuda.device_count() > 2:
                x2 = x.float().cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)
                x2 = x.float().cuda()
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 0)
                x2 = x2.cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)

        for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
            y = Variable(torch.randn(5, 5).type(t))
            self.assertIs(type(x.type_as(y).data), t)
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _test_btrisolve(self, cast):
        a = torch.FloatTensor((((1.3722, -0.9020),
                                (1.8849, 1.9169)),
                               ((0.7187, -1.1695),
                                (-0.0139, 1.3572)),
                               ((-1.6181, 0.7148),
                                (1.3728, 0.1319))))
        b = torch.FloatTensor(((4.02, 6.19),
                               (-1.56, 4.00),
                               (9.81, -4.09)))
        a, b = cast(a), cast(b)
        info = cast(torch.IntTensor())
        LU_data, pivots = a.btrifact(info=info)
        self.assertEqual(info.abs().sum(), 0)
        x = torch.btrisolve(b, LU_data, pivots)
        b_ = torch.bmm(a, x.unsqueeze(2)).squeeze()
        self.assertEqual(b_, b)
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
crnn_main.py 文件源码 项目:crnn.pytorch 作者: meijieru 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def trainBatch(net, criterion, optimizer):
    data = train_iter.next()
    cpu_images, cpu_texts = data
    batch_size = cpu_images.size(0)
    utils.loadData(image, cpu_images)
    t, l = converter.encode(cpu_texts)
    utils.loadData(text, t)
    utils.loadData(length, l)

    preds = crnn(image)
    preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
    cost = criterion(preds, text, preds_size, length) / batch_size
    crnn.zero_grad()
    cost.backward()
    optimizer.step()
    return cost
utils.py 文件源码 项目:crnn.pytorch 作者: meijieru 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def encode(self, text):
        """Support batch or single str.

        Args:
            text (str or list of str): texts to convert.

        Returns:
            torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
            torch.IntTensor [n]: length of each text.
        """
        if isinstance(text, str):
            text = [
                self.dict[char.lower() if self._ignore_case else char]
                for char in text
            ]
            length = [len(text)]
        elif isinstance(text, collections.Iterable):
            length = [len(s) for s in text]
            text = ''.join(text)
            text, _ = self.encode(text)
        return (torch.IntTensor(text), torch.IntTensor(length))
LookupTable.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None):
        super(LookupTable, self).__init__()
        self.weight = torch.Tensor(nIndex, nOutput)
        self.gradWeight = torch.Tensor(nIndex, nOutput).zero_()
        self.paddingValue = paddingValue
        self.maxNorm = maxNorm
        self.normType = normType
        self.shouldScaleGradByFreq = False

        self._gradOutput = None
        self._sorted = None
        self._indices = None

        self._count = torch.IntTensor()
        self._input = torch.LongTensor()

        self.reset()
LookupTable.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def type(self, type=None, tensorCache=None):
        if type is None:
            return self._type
        super(LookupTable, self).type(type, tensorCache)

        if type == 'torch.cuda.FloatTensor':
            # CUDA uses _sorted and _indices temporary tensors
            self._sorted = torch.cuda.LongTensor()
            self._indices = torch.cuda.LongTensor()
            self._count = torch.cuda.LongTensor()
            self._input = torch.cuda.LongTensor()
        else:
            # self._count and self._input should only be converted if using Cuda
            self._count = torch.IntTensor()
            self._input = torch.LongTensor()

        return self
test_autograd.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_type_conversions(self):
        x = Variable(torch.randn(5, 5))
        self.assertIs(type(x.float().data), torch.FloatTensor)
        self.assertIs(type(x.int().data), torch.IntTensor)
        if torch.cuda.is_available():
            self.assertIs(type(x.float().cuda().data), torch.cuda.FloatTensor)
            self.assertIs(type(x.int().cuda().data), torch.cuda.IntTensor)
            self.assertIs(type(x.int().cuda().cpu().data), torch.IntTensor)
            if torch.cuda.device_count() > 2:
                x2 = x.float().cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)
                x2 = x.float().cuda()
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 0)
                x2 = x2.cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)

        for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
            for var in (True, False):
                y = torch.randn(5, 5).type(t)
                if var:
                    y = Variable(y)
                self.assertIs(type(x.type_as(y).data), t)
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _test_btrisolve(self, cast):
        a = torch.FloatTensor((((1.3722, -0.9020),
                                (1.8849, 1.9169)),
                               ((0.7187, -1.1695),
                                (-0.0139, 1.3572)),
                               ((-1.6181, 0.7148),
                                (1.3728, 0.1319))))
        b = torch.FloatTensor(((4.02, 6.19),
                               (-1.56, 4.00),
                               (9.81, -4.09)))
        a, b = cast(a), cast(b)
        info = cast(torch.IntTensor())
        LU_data, pivots = a.btrifact(info=info)
        self.assertEqual(info.abs().sum(), 0)
        x = torch.btrisolve(b, LU_data, pivots)
        b_ = torch.bmm(a, x.unsqueeze(2)).squeeze()
        self.assertEqual(b_, b)
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
lstm_attention.py 文件源码 项目:pytorch-seq2seq 作者: rowanz 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _sample(self, state, context, mask, max_len=20):
        """
        Performs sampling
        """
        batch_size = state.size(0)

        toks = [const_row(self.bos_token, batch_size, volatile=True)]

        lens = torch.IntTensor(batch_size)
        if torch.cuda.is_available():
            lens = lens.cuda()

        for l in range(max_len + 1):  # +1 because of EOS
            out, state, alpha = self._lstm_loop(state, self.embedding(toks[-1]), context, mask)

            # Do argmax (since we're doing greedy decoding)
            toks.append(out.max(1)[1].squeeze(1))

            lens[(toks[-1].data == self.eos_token) & (lens == 0)] = l+1
            if all(lens):
                break
        lens[lens == 0] = max_len+1
        return torch.stack(toks, 0), lens


问题


面经


文章

微信
公众号

扫码关注公众号