python类LongTensor()的实例源码

dialog_model.py 文件源码 项目:end-to-end-negotiator 作者: facebookresearch 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def read(self, inpt, lang_h, ctx_h, prefix_token="THEM:"):
        """Reads a given utterance."""
        # inpt contains the pronounced utterance
        # add a "THEM:" token to the start of the message
        prefix = Variable(torch.LongTensor(1))
        prefix.data.fill_(self.word_dict.get_idx(prefix_token))
        inpt = torch.cat([self.to_device(prefix), inpt])

        # embed words
        inpt_emb = self.word_encoder(inpt)

        # append the context embedding to every input word embedding
        ctx_h_rep = ctx_h.expand(inpt_emb.size(0), ctx_h.size(1), ctx_h.size(2))
        inpt_emb = torch.cat([inpt_emb, ctx_h_rep], 2)

        # finally read in the words
        out, lang_h = self.reader(inpt_emb, lang_h)

        return out, lang_h
transforms.py 文件源码 项目:audio 作者: pytorch 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x):
        """

        Args:
            x (FloatTensor/LongTensor or ndarray)

        Returns:
            x_mu (LongTensor or ndarray)

        """
        mu = self.qc - 1.
        if isinstance(x, np.ndarray):
            x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
            x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
        elif isinstance(x, (torch.Tensor, torch.LongTensor)):
            if isinstance(x, torch.LongTensor):
                x = x.float()
            mu = torch.FloatTensor([mu])
            x_mu = torch.sign(x) * torch.log1p(mu *
                                               torch.abs(x)) / torch.log1p(mu)
            x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
        return x_mu
dream.py 文件源码 项目:DREAM 作者: LaceyChen17 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, x, lengths, hidden):
        # Basket Encoding 
        ub_seqs = [] # users' basket sequence
        for user in x: # x shape (batch of user, time_step, indice of product) nested lists
            embed_baskets = []
            for basket in user:
                basket = torch.LongTensor(basket).resize_(1, len(basket))
                basket = basket.cuda() if self.config.cuda else basket # use cuda for acceleration
                basket = self.encode(torch.autograd.Variable(basket)) # shape: 1, len(basket), embedding_dim
                embed_baskets.append(self.pool(basket, dim = 1))
            # concat current user's all baskets and append it to users' basket sequence
            ub_seqs.append(torch.cat(embed_baskets, 1)) # shape: 1, num_basket, embedding_dim

        # Input for rnn 
        ub_seqs = torch.cat(ub_seqs, 0).cuda() if self.config.cuda else torch.cat(ub_seqs, 0) # shape: batch_size, max_len, embedding_dim
        packed_ub_seqs = torch.nn.utils.rnn.pack_padded_sequence(ub_seqs, lengths, batch_first=True) # packed sequence as required by pytorch

        # RNN
        output, h_u = self.rnn(packed_ub_seqs, hidden)
        dynamic_user, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True) # shape: batch_size, max_len, embedding_dim
        return dynamic_user, h_u
train.py 文件源码 项目:DREAM 作者: LaceyChen17 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reorder_bpr_loss(re_x, his_x, dynamic_user, item_embedding, config):
    '''
        loss function for reorder prediction
        re_x padded reorder baskets
        his_x padded history bought items
    '''
    nll = 0
    ub_seqs = []
    for u, h, du in zip(re_x, his_x, dynamic_user):
        du_p_product = torch.mm(du, item_embedding.t()) # shape: max_len, num_item
        nll_u = [] # nll for user
        for t, basket_t in enumerate(u):
            if basket_t[0] != 0:
                pos_idx = torch.cuda.LongTensor(basket_t) if config.cuda else torch.LongTensor(basket_t)                               
                # Sample negative products
                neg = [random.choice(h[t]) for _ in range(len(basket_t))] # replacement
                # neg = random.sample(range(1, config.num_product), len(basket_t)) # without replacement
                neg_idx = torch.cuda.LongTensor(neg) if config.cuda else torch.LongTensor(neg)
                # Score p(u, t, v > v')
                score = du_p_product[t - 1][pos_idx] - du_p_product[t - 1][neg_idx]
                # Average Negative log likelihood for basket_t
                nll_u.append(- torch.mean(torch.nn.LogSigmoid()(score)))
        nll += torch.mean(torch.cat(nll_u))
    return nll
train.py 文件源码 项目:Structured-Self-Attentive-Sentence-Embedding 作者: ExplorerFreda 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def package(data, volatile=False):
    """Package data for training / evaluation."""
    data = map(lambda x: json.loads(x), data)
    dat = map(lambda x: map(lambda y: dictionary.word2idx[y], x['text']), data)
    maxlen = 0
    for item in dat:
        maxlen = max(maxlen, len(item))
    targets = map(lambda x: x['label'], data)
    maxlen = min(maxlen, 500)
    for i in range(len(data)):
        if maxlen < len(dat[i]):
            dat[i] = dat[i][:maxlen]
        else:
            for j in range(maxlen - len(dat[i])):
                dat[i].append(dictionary.word2idx['<pad>'])
    dat = Variable(torch.LongTensor(dat), volatile=volatile)
    targets = Variable(torch.LongTensor(targets), volatile=volatile)
    return dat.t(), targets
data.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def tokenize(self, path):
        """Tokenizes a text file."""
        assert os.path.exists(path)
        # Add words to the dictionary
        with open(path, 'r') as f:
            tokens = 0
            for line in f:
                words = line.split() + ['<eos>']
                tokens += len(words)
                for word in words:
                    self.dictionary.add_word(word)

        # Tokenize file content
        with open(path, 'r') as f:
            ids = torch.LongTensor(tokens)
            token = 0
            for line in f:
                words = line.split() + ['<eos>']
                for word in words:
                    ids[token] = self.dictionary.word2idx[word]
                    token += 1

        return ids
saliency.py 文件源码 项目:DeepLearning_PlantDiseases 作者: MarkoArsenovic 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def Saliency_map(image,model,preprocess,ground_truth,use_gpu=False,method=util.GradType.GUIDED):
    vis_param_dict['method'] = method
    img_tensor = preprocess(image)
    img_tensor.unsqueeze_(0)
    if use_gpu:
        img_tensor=img_tensor.cuda()
    input = Variable(img_tensor,requires_grad=True)

    if  input.grad is not None:
        input.grad.data.zero_()

    model.zero_grad()
    output = model(input)
    ind=torch.LongTensor(1)
    if(isinstance(ground_truth,np.int64)):
        ground_truth=np.asscalar(ground_truth)
    ind[0]=ground_truth
    ind=Variable(ind)
    energy=output[0,ground_truth]
    energy.backward() 
    grad=input.grad
    if use_gpu:
        return np.abs(grad.data.cpu().numpy()[0]).max(axis=0)
    return np.abs(grad.data.numpy()[0]).max(axis=0)
dataloader.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def default_collate(batch):
    "Puts each data field into a tensor with outer dimension batch size"
    if torch.is_tensor(batch[0]):
        return torch.cat([t.view(1, *t.size()) for t in batch], 0)
    elif isinstance(batch[0], int):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], str):
        return batch
    elif isinstance(batch[0], collections.Iterable):
        # if each batch element is not a tensor, then it should be a tuple
        # of tensors; in that case we collate each element in the tuple
        transposed = zip(*batch)
        return [default_collate(samples) for samples in transposed]

    raise TypeError(("batch must contain tensors, numbers, or lists; found {}"
                     .format(type(batch[0]))))
LookupTable.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None):
        super(LookupTable, self).__init__()
        self.weight = torch.Tensor(nIndex, nOutput)
        self.gradWeight = torch.Tensor(nIndex, nOutput).zero_()
        self.paddingValue = paddingValue
        self.maxNorm = maxNorm
        self.normType = normType
        self.shouldScaleGradByFreq = False

        self._gradOutput = None
        self._sorted = None
        self._indices = None

        self._count = torch.IntTensor()
        self._input = torch.LongTensor()

        self.reset()
LookupTable.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def type(self, type=None, tensorCache=None):
        if not type:
            return self._type
        super(LookupTable, self).type(type, tensorCache)

        if type == 'torch.cuda.FloatTensor':
            # CUDA uses _sorted and _indices temporary tensors
            self._sorted = torch.cuda.LongTensor()
            self._indices = torch.cuda.LongTensor()
            self._count = torch.cuda.LongTensor()
            self._input = torch.cuda.LongTensor()
        else:
            # self._count and self._input should only be converted if using Cuda
            self._count = torch.IntTensor()
            self._input = torch.LongTensor()

        return self
test_legacy_nn.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_Index(self):
        net = nn.Index(0)

        # test 1D
        input = [torch.Tensor((10, 20, 30)), torch.LongTensor((0, 1, 1, 2))]
        output = net.forward(input)
        self.assertEqual(output, torch.Tensor((10, 20, 20, 30)))

        gradOutput = torch.Tensor((1, 1, 1, 3))
        gradInput = net.backward(input, gradOutput)
        self.assertEqual(gradInput[0], torch.Tensor((1, 2, 3)))

        # test 2D
        input = [torch.Tensor(((10, 20), (30, 40))), torch.LongTensor((0, 0))]
        output = net.forward(input)
        self.assertEqual(output, torch.Tensor(((10, 20), (10, 20))))

        gradOutput = torch.Tensor(((1, 2), (1, 2)))
        gradInput = net.backward(input, gradOutput)
        self.assertEqual(gradInput[0], torch.Tensor(((2, 4), (0, 0))))

        # Check that these don't raise errors
        net.__repr__()
        str(net)
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_scatter(self):
        m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
        elems_per_row = random.randint(1, 10)
        dim = random.randrange(3)

        idx_size = [m, n, o]
        idx_size[dim] = elems_per_row
        idx = torch.LongTensor().resize_(*idx_size)
        self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)
        src = torch.Tensor().resize_(*idx_size).normal_()

        actual = torch.zeros(m, n, o).scatter_(dim, idx, src)
        expected = torch.zeros(m, n, o)
        for i in range(idx_size[0]):
            for j in range(idx_size[1]):
                for k in range(idx_size[2]):
                    ii = [i, j, k]
                    ii[dim] = idx[i,j,k]
                    expected[tuple(ii)] = src[i,j,k]
        self.assertEqual(actual, expected, 0)

        idx[0][0][0] = 34
        self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, src))
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_scatterFill(self):
        m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
        elems_per_row = random.randint(1, 10)
        dim = random.randrange(3)

        val = random.random()
        idx_size = [m, n, o]
        idx_size[dim] = elems_per_row
        idx = torch.LongTensor().resize_(*idx_size)
        self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)

        actual = torch.zeros(m, n, o).scatter_(dim, idx, val)
        expected = torch.zeros(m, n, o)
        for i in range(idx_size[0]):
            for j in range(idx_size[1]):
                for k in range(idx_size[2]):
                    ii = [i, j, k]
                    ii[dim] = idx[i,j,k]
                    expected[tuple(ii)] = val
        self.assertEqual(actual, expected, 0)

        idx[0][0][0] = 28
        self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, val))
aligned_dataset.py 文件源码 项目:DeblurGAN 作者: KupynOrest 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')
        AB = AB.resize((self.opt.loadSizeX * 2, self.opt.loadSizeY), Image.BICUBIC)
        AB = self.transform(AB)

        w_total = AB.size(2)
        w = int(w_total / 2)
        h = AB.size(1)
        w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1))
        h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1))

        A = AB[:, h_offset:h_offset + self.opt.fineSize,
               w_offset:w_offset + self.opt.fineSize]
        B = AB[:, h_offset:h_offset + self.opt.fineSize,
               w + w_offset:w + w_offset + self.opt.fineSize]

        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)

        return {'A': A, 'B': B,
                'A_paths': AB_path, 'B_paths': AB_path}
classifier.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def prepare_split(self, X, y, validation_data=None, validation_split=None):
        # Preparing validation data
        assert validation_split or validation_data
        if validation_data is not None:
            trainX, trainy = X, y
            devX, devy = validation_data
        else:
            permutation = np.random.permutation(len(X))
            trainidx = permutation[int(validation_split*len(X)):]
            devidx = permutation[0:int(validation_split*len(X))]
            trainX, trainy = X[trainidx], y[trainidx]
            devX, devy = X[devidx], y[devidx]

        if not self.cudaEfficient:
            trainX = torch.FloatTensor(trainX).cuda()
            trainy = torch.LongTensor(trainy).cuda()
            devX = torch.FloatTensor(devX).cuda()
            devy = torch.LongTensor(devy).cuda()
        else:
            trainX = torch.FloatTensor(trainX)
            trainy = torch.LongTensor(trainy)
            devX = torch.FloatTensor(devX)
            devy = torch.LongTensor(devy)

        return trainX, trainy, devX, devy
classifier.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def trainepoch(self, X, y, epoch_size=1):
        self.model.train()
        for _ in range(self.nepoch, self.nepoch + epoch_size):
            permutation = np.random.permutation(len(X))
            all_costs = []
            for i in range(0, len(X), self.batch_size):
                # forward
                idx = torch.LongTensor(permutation[i:i + self.batch_size])
                if isinstance(X, torch.cuda.FloatTensor):
                    idx = idx.cuda()
                Xbatch = Variable(X.index_select(0, idx))
                ybatch = Variable(y.index_select(0, idx))
                if self.cudaEfficient:
                    Xbatch = Xbatch.cuda()
                    ybatch = ybatch.cuda()
                output = self.model(Xbatch)
                # loss
                loss = self.loss_fn(output, ybatch)
                all_costs.append(loss.data[0])
                # backward
                self.optimizer.zero_grad()
                loss.backward()
                # Update parameters
                self.optimizer.step()
        self.nepoch += epoch_size
classifier.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def score(self, devX, devy):
        self.model.eval()
        correct = 0
        if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient:
            devX = torch.FloatTensor(devX).cuda()
            devy = torch.LongTensor(devy).cuda()
        for i in range(0, len(devX), self.batch_size):
            Xbatch = Variable(devX[i:i + self.batch_size], volatile=True)
            ybatch = Variable(devy[i:i + self.batch_size], volatile=True)
            if self.cudaEfficient:
                Xbatch = Xbatch.cuda()
                ybatch = ybatch.cuda()
            output = self.model(Xbatch)
            pred = output.data.max(1)[1]
            correct += pred.long().eq(ybatch.data.long()).sum()
        accuracy = 1.0*correct / len(devX)
        return accuracy
test_field.py 文件源码 项目:text 作者: pytorch 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_process(self):
        raw_field = data.RawField()
        field = data.Field(sequential=True, use_vocab=False, batch_first=True)

        # Test tensor-like batch data which is accepted by both RawField and Field
        batch = [[1, 2, 3], [2, 3, 4]]
        batch_tensor = torch.LongTensor(batch)

        raw_field_processed = raw_field.process(batch)
        field_processed = field.process(batch, device=-1, train=False)

        assert raw_field_processed == batch
        assert field_processed.data.equal(batch_tensor)

        # Test non-tensor data which is only accepted by RawField
        any_obj = [object() for _ in range(5)]

        raw_field_processed = raw_field.process(any_obj)
        assert any_obj == raw_field_processed

        with pytest.raises(TypeError):
            field.process(any_obj)
field.py 文件源码 项目:text 作者: pytorch 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def __init__(
            self, sequential=True, use_vocab=True, init_token=None,
            eos_token=None, fix_length=None, tensor_type=torch.LongTensor,
            preprocessing=None, postprocessing=None, lower=False,
            tokenize=(lambda s: s.split()), include_lengths=False,
            batch_first=False, pad_token="<pad>", unk_token="<unk>",
            pad_first=False):
        self.sequential = sequential
        self.use_vocab = use_vocab
        self.init_token = init_token
        self.eos_token = eos_token
        self.unk_token = unk_token
        self.fix_length = fix_length
        self.tensor_type = tensor_type
        self.preprocessing = preprocessing
        self.postprocessing = postprocessing
        self.lower = lower
        self.tokenize = get_tokenizer(tokenize)
        self.include_lengths = include_lengths
        self.batch_first = batch_first
        self.pad_token = pad_token if self.sequential else None
        self.pad_first = pad_first
shuffledataset.py 文件源码 项目:tnt 作者: pytorch 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def resample(self, seed=None):
        """Resample the dataset.

        Args:
            seed (int, optional): Seed for resampling. By default no seed is
            used.
        """
        if seed is not None:
            gen = torch.manual_seed(seed)
        else:
            gen = torch.default_generator

        if self.replacement:
            self.perm = torch.LongTensor(len(self)).random_(
                len(self.dataset), generator=gen)
        else:
            self.perm = torch.randperm(
                len(self.dataset), generator=gen).narrow(0, 0, len(self))
test_datasets.py 文件源码 项目:tnt 作者: pytorch 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def testListDataset(self):
        def identity(x): return x
        h = [0, 1, 2]
        d = dataset.ListDataset(elem_list=h, load=identity)
        self.assertEqual(len(d), 3)
        self.assertEqual(d[0], 0)

        t = torch.LongTensor([0, 1, 2])
        d = dataset.ListDataset(elem_list=t, load=identity)
        self.assertEqual(len(d), 3)
        self.assertEqual(d[0], 0)

        a = np.asarray([0, 1, 2])
        d = dataset.ListDataset(elem_list=a, load=identity)
        self.assertEqual(len(d), 3)
        self.assertEqual(d[0], 0)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_batched_index_select(self):
        indices = numpy.array([[[1, 2],
                                [3, 4]],
                               [[5, 6],
                                [7, 8]]])
        # Each element is a vector of it's index.
        targets = torch.ones([2, 10, 3]).cumsum(1) - 1
        # Make the second batch double it's index so they're different.
        targets[1, :, :] *= 2
        indices = Variable(torch.LongTensor(indices))
        targets = Variable(targets)
        selected = util.batched_index_select(targets, indices)

        assert list(selected.size()) == [2, 2, 2, 3]
        ones = numpy.ones([3])
        numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
        numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
        numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
        numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)

        numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 10)
        numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 12)
        numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 14)
        numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 16)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_add_sentence_boundary_token_ids_handles_3D_input(self):
        tensor = Variable(torch.from_numpy(
                numpy.array([[[1, 2, 3, 4],
                              [5, 5, 5, 5],
                              [6, 8, 1, 2]],
                             [[4, 3, 2, 1],
                              [8, 7, 6, 5],
                              [0, 0, 0, 0]]])))
        mask = ((tensor > 0).sum(dim=-1) > 0).type(torch.LongTensor)
        bos = Variable(torch.from_numpy(numpy.array([9, 9, 9, 9])))
        eos = Variable(torch.from_numpy(numpy.array([10, 10, 10, 10])))
        new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
        expected_new_tensor = numpy.array([[[9, 9, 9, 9],
                                            [1, 2, 3, 4],
                                            [5, 5, 5, 5],
                                            [6, 8, 1, 2],
                                            [10, 10, 10, 10]],
                                           [[9, 9, 9, 9],
                                            [4, 3, 2, 1],
                                            [8, 7, 6, 5],
                                            [10, 10, 10, 10],
                                            [0, 0, 0, 0]]])
        assert (new_tensor.data.numpy() == expected_new_tensor).all()
        assert (new_mask.data.numpy() == ((expected_new_tensor > 0).sum(axis=-1) > 0)).all()
text_field.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def as_tensor(self,
                  padding_lengths: Dict[str, int],
                  cuda_device: int = -1,
                  for_training: bool = True) -> Dict[str, torch.Tensor]:
        tensors = {}
        desired_num_tokens = padding_lengths['num_tokens']
        for indexer_name, indexer in self._token_indexers.items():
            padded_array = indexer.pad_token_sequence(self._indexed_tokens[indexer_name],
                                                      desired_num_tokens, padding_lengths)
            # We use the key of the indexer to recognise what the tensor corresponds to within the
            # field (i.e. the result of word indexing, or the result of character indexing, for
            # example).
            # TODO(mattg): we might someday have a TokenIndexer that needs to use something other
            # than a LongTensor here, and it's not clear how to signal that.  Maybe we'll need to
            # add a class method to TokenIndexer to tell us the type?  But we can worry about that
            # when there's a compelling use case for it.
            tensor = Variable(torch.LongTensor(padded_array), volatile=not for_training)
            tensors[indexer_name] = tensor if cuda_device == -1 else tensor.cuda(cuda_device)
        return tensors
util.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
    """
    Compute sequence lengths for each batch element in a tensor using a
    binary mask.

    Parameters
    ----------
    mask : torch.Tensor, required.
        A 2D binary mask of shape (batch_size, sequence_length) to
        calculate the per-batch sequence lengths from.

    Returns
    -------
    A torch.LongTensor of shape (batch_size,) representing the lengths
    of the sequences in the batch.
    """
    return mask.long().sum(-1)
trainer.py 文件源码 项目:DenseNet 作者: kevinzakka 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def accuracy(self, predicted, ground_truth):
        """
        Utility function for calculating the accuracy of the model.

        Params
        ------
        - predicted: (torch.FloatTensor)
        - ground_truth: (torch.LongTensor)

        Returns
        -------
        - acc: (float) % accuracy.
        """
        predicted = torch.max(predicted, 1)[1]
        total = len(ground_truth)
        correct = (predicted == ground_truth).sum()
        acc = 100 * (correct / total)
        return acc
transform.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def sent2tenosr(self, sentence):
        max_len = self.args.max_word_len - 2
        sentence = normalizeString(sentence)
        words = [w for w in sentence.strip().split()]

        if len(words) > max_len:
            words = words[:max_len]

        words = [WORD[BOS]] + words + [WORD[EOS]]
        idx = [self.src_dict[w] if w in self.src_dict else UNK for w in words]

        idx_data = torch.LongTensor(idx)
        idx_position = torch.LongTensor([pos_i+1 if w_i != PAD else 0 for pos_i, w_i in enumerate(idx)])
        idx_data_tensor = Variable(idx_data.unsqueeze(0), volatile=True)
        idx_position_tensor = Variable(idx_position.unsqueeze(0), volatile=True)

        if self.cuda:
            idx_data_tensor = idx_data_tensor.cuda()
            idx_position_tensor = idx_position_tensor.cuda()

        return idx_data_tensor, idx_position_tensor
transforms.py 文件源码 项目:audio 作者: pytorch 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, x_mu):
        """

        Args:
            x_mu (FloatTensor/LongTensor or ndarray)

        Returns:
            x (FloatTensor or ndarray)

        """
        mu = self.qc - 1.
        if isinstance(x_mu, np.ndarray):
            x = ((x_mu) / mu) * 2 - 1.
            x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
        elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
            if isinstance(x_mu, torch.LongTensor):
                x_mu = x_mu.float()
            mu = torch.FloatTensor([mu])
            x = ((x_mu) / mu) * 2 - 1.
            x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
        return x
test_threadutils.py 文件源码 项目:ParlAI 作者: facebookresearch 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_torch(self):
        try:
            import torch
        except ImportError:
            # pass by default if no torch available
            return

        st = SharedTable({'a': torch.FloatTensor([1]), 'b': torch.LongTensor(2)})
        assert st['a'][0] == 1.0
        assert len(st) == 2
        assert 'b' in st
        del st['b']
        assert 'b' not in st
        assert len(st) == 1

        if torch.cuda.is_available():
            st = SharedTable({'a': torch.cuda.FloatTensor([1]), 'b': torch.cuda.LongTensor(2)})
            assert st['a'][0] == 1.0
            assert len(st) == 2
            assert 'b' in st
            del st['b']
            assert 'b' not in st
            assert len(st) == 1
data.py 文件源码 项目:ParlAI 作者: facebookresearch 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def collate(samples, pad_idx, eos_idx):

        def merge(key, left_pad, move_eos_to_beginning=False):
            return LanguagePairDataset.collate_tokens(
                [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)

        return {
            'id': torch.LongTensor([s['id'].item() for s in samples]),
            'src_tokens': merge('source', left_pad=LanguagePairDataset.LEFT_PAD_SOURCE),
            # we create a shifted version of targets for feeding the previous
            # output token(s) into the next decoder step
            'input_tokens': merge('target', left_pad=LanguagePairDataset.LEFT_PAD_TARGET,
                                  move_eos_to_beginning=True),
            'target': merge('target', left_pad=LanguagePairDataset.LEFT_PAD_TARGET),
            'ntokens': sum(len(s['target']) for s in samples),
        }


问题


面经


文章

微信
公众号

扫码关注公众号