python类stack()的实例源码

sru.py 文件源码 项目:benchmark 作者: pytorch 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def forward(self, input, c0=None, return_hidden=True):
        assert input.dim() == 3 # (len, batch, n_in)
        dir_ = 2 if self.bidirectional else 1
        if c0 is None:
            zeros = Variable(input.data.new(
                input.size(1), self.n_out*dir_
            ).zero_())
            c0 = [ zeros for i in range(self.depth) ]
        else:
            assert c0.dim() == 3    # (depth, batch, n_out*dir_)
            c0 = [ x.squeeze(0) for x in c0.chunk(self.depth, 0) ]

        prevx = input
        lstc = []
        for i, rnn in enumerate(self.rnn_lst):
            h, c = rnn(prevx, c0[i])
            prevx = h
            lstc.append(c)

        if return_hidden:
            return prevx, torch.stack(lstc)
        else:
            return prevx
interface.py 文件源码 项目:sk-torch 作者: mattHawthorn 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit(self, X: Iterable[T1], y: Iterable[T2],
            X_test: Opt[Iterable[T1]]=None, y_test: Opt[Iterable[T2]]=None,
            batch_size: Opt[int]=None, shuffle: bool=False,
            max_epochs: int=1, min_epochs: int=1, criterion_window: int=5,
            max_training_time: Opt[float]=None,
            batch_report_interval: Opt[int]=None, epoch_report_interval: Opt[int]=None):
        """This method fits the *entire* pipeline, including input normalization. Initialization of weight/bias
        parameters in the torch_module is up to you; there is no obvious canonical way to do it here.
        Returns per-epoch losses and validation losses (if any)."""
        batch_size = batch_size or self.default_batch_size
        if self.should_normalize:
            sample, X = peek(X, self.norm_n_samples)
            if self.encode_input:
                sample = [self.encode_input(x) for x in sample]
            sample = stack(sample)
            self.estimate_normalization(sample)

        return self.update(X=X, y=y, X_test=X_test, y_test=y_test, batch_size=batch_size, shuffle=shuffle,
                           max_epochs=max_epochs, min_epochs=min_epochs,
                           criterion_window=criterion_window,
                           max_training_time=max_training_time,
                           batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
interface.py 文件源码 项目:sk-torch 作者: mattHawthorn 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def fit_zipped(self, dataset: Iterable[Tuple[T1, T2]], test_dataset: Opt[Iterable[Tuple[T1, T2]]]=None,
                   batch_size: Opt[int] = None,
                   max_epochs: int = 1, min_epochs: int = 1, criterion_window: int = 5,
                   max_training_time: Opt[float] = None,
                   batch_report_interval: Opt[int] = None, epoch_report_interval: Opt[int] = None):
        """For fitting to an iterable sequence of pairs, such as may arise in very large streaming datasets from sources
        that don't fit the random access and known-length requirements of a torch.data.Dataset (e.g. a sequence of
        sentences split from a set of text files as might arise in NLP applications.
        Like TorchModel.fit(), this estimates input normalization before the weight update, and weight initialization of
        the torch_module is up to you. Returns per-epoch losses and validation losses (if any).
        This method handles packaging X and y into a batch iterator of the kind that torch modules expect."""
        batch_size = batch_size or self.default_batch_size
        if self.should_normalize:
            sample, dataset = peek(dataset, self.norm_n_samples)
            sample = [t[0] for t in sample]
            if self.encode_input:
                sample = [self.encode_input(x) for x in sample]
            sample = stack(sample)
            self.estimate_normalization(sample)

        return self.update_zipped(dataset=dataset, test_dataset=test_dataset, batch_size=batch_size,
                                  max_epochs=max_epochs, min_epochs=min_epochs,
                                  criterion_window=criterion_window,
                                  max_training_time=max_training_time,
                                  batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
data.py 文件源码 项目:dong_iccv_2017 作者: woozzu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _get_word_vectors(self, desc, word_embedding):
        output = []
        len_desc = []
        for i in range(desc.shape[1]):
            words = self._nums2chars(desc[:, i])
            words = split_sentence_into_words(words)
            word_vecs = torch.Tensor([word_embedding[w] for w in words])
            # zero padding
            if len(words) < self.max_word_length:
                word_vecs = torch.cat((
                    word_vecs,
                    torch.zeros(self.max_word_length - len(words), word_vecs.size(1))
                ))
            output.append(word_vecs)
            len_desc.append(len(words))
        return torch.stack(output), len_desc
voc0712.py 文件源码 项目:ssd_pytorch 作者: miraclebiu 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for _, sample in enumerate(batch):
        for _, tup in enumerate(sample):
            #pdb.set_trace()
            if torch.is_tensor(tup):
                imgs.append(tup)
            elif isinstance(tup, type([])):
                annos = [torch.Tensor(a) for a in tup]
                #pdb.set_trace()
                targets.append(torch.stack(annos, 0))

    return (torch.stack(imgs, 0), targets)
model.py 文件源码 项目:Seq2Seq-PyTorch 作者: MaximumEntropy 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def forward(self, input, hidden, ctx, ctx_mask=None):
        """Propogate input through the layer."""
        h_0, c_0 = hidden
        h_1, c_1 = [], []
        for i, layer in enumerate(self.layers):
            if ctx_mask is not None:
                ctx_mask = torch.ByteTensor(
                    ctx_mask.data.cpu().numpy().astype(np.int32).tolist()
                ).cuda()
            output, (h_1_i, c_1_i) = layer(input, (h_0, c_0), ctx, ctx_mask)

            input = output

            if i != len(self.layers):
                input = self.dropout(input)

            h_1 += [h_1_i]
            c_1 += [c_1_i]

        h_1 = torch.stack(h_1)
        c_1 = torch.stack(c_1)

        return input, (h_1, c_1)
voc0712.py 文件源码 项目:yolov2 作者: zhangkaij 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(torch.FloatTensor(sample[1]))
    return torch.stack(imgs, 0), targets
DistanceNetwork.py 文件源码 项目:MatchingNetworks 作者: gitabcworld 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def forward(self, support_set, input_image):

        """
        Produces pdfs over the support set classes for the target set image.
        :param support_set: The embeddings of the support set images, tensor of shape [sequence_length, batch_size, 64]
        :param input_image: The embedding of the target image, tensor of shape [batch_size, 64]
        :return: Softmax pdf. Tensor with cosine similarities of shape [batch_size, sequence_length]
        """
        eps = 1e-10
        similarities = []
        for support_image in support_set:
            sum_support = torch.sum(torch.pow(support_image, 2), 1)
            support_magnitude = sum_support.clamp(eps, float("inf")).rsqrt()
            dot_product = input_image.unsqueeze(1).bmm(support_image.unsqueeze(2)).squeeze()
            cosine_similarity = dot_product * support_magnitude
            similarities.append(cosine_similarity)
        similarities = torch.stack(similarities)
        return similarities
recurrent.py 文件源码 项目:seq2seq.pytorch 作者: eladhoffer 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, inputs, hidden):
        def select_layer(h_state, i):  # To work on both LSTM / GRU, RNN
            if isinstance(h_state, tuple):
                return tuple([select_layer(s, i) for s in h_state])
            else:
                return h_state[i]

        next_hidden = []
        for i, layer in enumerate(self.layers):
            next_hidden_i = layer(inputs, select_layer(hidden, i))
            output = next_hidden_i[0] if isinstance(next_hidden_i, tuple) \
                else next_hidden_i
            if i + 1 != self.num_layers:
                output = self.dropout(output)
            if self.residual:
                inputs = output + inputs
            else:
                inputs = output
            next_hidden.append(next_hidden_i)
        if isinstance(hidden, tuple):
            next_hidden = tuple([torch.stack(h) for h in zip(*next_hidden)])
        else:
            next_hidden = torch.stack(next_hidden)
        return inputs, next_hidden
lstm_attention.py 文件源码 项目:pytorch-seq2seq 作者: rowanz 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _sample(self, state, context, mask, max_len=20):
        """
        Performs sampling
        """
        batch_size = state.size(0)

        toks = [const_row(self.bos_token, batch_size, volatile=True)]

        lens = torch.IntTensor(batch_size)
        if torch.cuda.is_available():
            lens = lens.cuda()

        for l in range(max_len + 1):  # +1 because of EOS
            out, state, alpha = self._lstm_loop(state, self.embedding(toks[-1]), context, mask)

            # Do argmax (since we're doing greedy decoding)
            toks.append(out.max(1)[1].squeeze(1))

            lens[(toks[-1].data == self.eos_token) & (lens == 0)] = l+1
            if all(lens):
                break
        lens[lens == 0] = max_len+1
        return torch.stack(toks, 0), lens
dataloader.py 文件源码 项目:Text-Summarization 作者: hashbangCoder 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def evalPreproc(self, sample):   
        # sample length = 1
        # limit max article size to 400 tokens     
        extIntArticles, intRevArticles = [], []
        max_article_oov = 0        
        article = sample['article'].split(' ')                  
        # get article  int-tokenized
        _intArticle, _extIntArticle, article_oov, _ = self.makeEncoderInput(article)
        if max_article_oov < len(article_oov):
            max_article_oov = len(article_oov)
        _intRevArticle = list(reversed(_intArticle))
        # _intAbstract, _extIntAbstract, abs_len = self.makeDecoderInput(abstract, article_oov)

        extIntArticles.append(_extIntArticle)            
        intRevArticles.append(_intRevArticle)

        padExtArticles = [torch.LongTensor(item) for item in extIntArticles]        
        padRevArticles = [torch.LongTensor(item) for item in intRevArticles]                

        batchExtArticles = torch.stack(padExtArticles, 0)
        # replace temp ids with unk token id for enc input
        batchArticles = batchExtArticles.clone().masked_fill_((batchExtArticles > self.vocabSize), self.word2id['<unk>'])
        batchRevArticles = torch.stack(padRevArticles, 0)

        return batchArticles, batchRevArticles, batchExtArticles, max_article_oov, article_oov, sample['article'], sample['abstract']
dataloader.py 文件源码 项目:Text-Summarization 作者: hashbangCoder 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def getInputTextSample(self, tokenized_text):
        extIntArticles, intRevArticles = [], []
        max_article_oov = 0        
        # get article  int-tokenized
        _intArticle, _extIntArticle, article_oov, _ = self.makeEncoderInput(tokenized_text)
        if max_article_oov < len(article_oov):
            max_article_oov = len(article_oov)
        _intRevArticle = list(reversed(_intArticle))

        extIntArticles.append(_extIntArticle)            
        intRevArticles.append(_intRevArticle)

        padExtArticles = [torch.LongTensor(item) for item in extIntArticles]        
        padRevArticles = [torch.LongTensor(item) for item in intRevArticles]                

        batchExtArticles = torch.stack(padExtArticles, 0)
        # replace temp ids with unk token id for enc input
        batchArticles = batchExtArticles.clone().masked_fill_((batchExtArticles > self.vocabSize), self.word2id['<unk>'])
        batchRevArticles = torch.stack(padRevArticles, 0)

        return batchArticles, batchRevArticles, batchExtArticles, max_article_oov, article_oov
spbatch.py 文件源码 项目:qpth 作者: locuslab 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def solve_kkt(Ks, K, Ktildes, Ktilde,
              rx, rs, rz, ry, niter=1):
    nBatch = len(Ks)
    nz = rx.size(1)
    nineq = rz.size(1)
    neq = ry.size(1)

    r = -torch.cat((rx, rs, rz, ry), 1)

    l = torch.spbqrfactsolve(*([r] + Ktilde))
    res = torch.stack([r[i] - torch.mm(Ks[i], l[i].unsqueeze(1))
                       for i in range(nBatch)])
    for k in range(niter):
        d = torch.spbqrfactsolve(*([res] + Ktilde))
        l = l + d
        res = torch.stack([r[i] - torch.mm(Ks[i], l[i].unsqueeze(1))
                           for i in range(nBatch)])

    solx = l[:, :nz]
    sols = l[:, nz:nz + nineq]
    solz = l[:, nz + nineq:nz + 2 * nineq]
    soly = l[:, nz + 2 * nineq:nz + 2 * nineq + neq]

    return solx, sols, solz, soly
data.py 文件源码 项目:loop 作者: facebookresearch 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, src, trgt, spkr, seq_len):
        self.seq_len = seq_len
        self.start = True

        self.speakers = spkr
        self.srcBatch = src[0]
        self.srcLenths = src[1]

        # split batch
        self.tgtBatch = list(torch.split(trgt[0], self.seq_len, 0))
        self.tgtBatch.reverse()
        self.len = len(self.tgtBatch)

        # split length list
        batch_seq_len = len(self.tgtBatch)
        self.tgtLenths = [self.split_length(l, batch_seq_len) for l in trgt[1]]
        self.tgtLenths = torch.stack(self.tgtLenths)
        self.tgtLenths = list(torch.split(self.tgtLenths, 1, 1))
        self.tgtLenths = [x.squeeze() for x in self.tgtLenths]
        self.tgtLenths.reverse()

        assert len(self.tgtLenths) == len(self.tgtBatch)
AttModel.py 文件源码 项目:self-critical.pytorch 作者: ruotianluo 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
        prev_h = state[0][-1]
        att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)

        h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))

        att = self.attention(h_att, att_feats, p_att_feats)

        lang_lstm_input = torch.cat([att, h_att], 1)
        # lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????

        h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))

        output = F.dropout(h_lang, self.drop_prob_lm, self.training)
        state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))

        return output, state
train.py 文件源码 项目:examples 作者: pytorch 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, input, future = 0):
        outputs = []
        h_t = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
        c_t = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
        h_t2 = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
        c_t2 = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)

        for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        for i in range(future):# if we should predict the future
            h_t, c_t = self.lstm1(output, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        outputs = torch.stack(outputs, 1).squeeze(2)
        return outputs
bbox_transform.py 文件源码 项目:faster-rcnn.pytorch 作者: jwyang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def bbox_transform(ex_rois, gt_rois):
    ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
    ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
    ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
    ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

    gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
    gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
    gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
    gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

    targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = torch.log(gt_widths / ex_widths)
    targets_dh = torch.log(gt_heights / ex_heights)

    targets = torch.stack(
        (targets_dx, targets_dy, targets_dw, targets_dh),1)

    return targets
goru.py 文件源码 项目:URNN-PyTorch 作者: jingli9111 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def forward(self, input_, length=None, hx=None):
        if self.batch_first:
            input_ = input_.transpose(0, 1)
        max_time, batch_size, _ = input_.size()
        if length is None:
            length = Variable(torch.LongTensor([max_time] * batch_size))
            if input_.is_cuda:
                length = length.cuda()
        if hx is None:
            hx = Variable(input_.data.new(batch_size, self.hidden_size).zero_())

        h_n = []
        layer_output = None
        for layer in range(self.num_layer):
            layer_output, layer_h_n = GORU._forward_rnn(
                cell=self.cells[layer], input_ = input_, length=length, hx =hx)
            input_ = self.dropout_layer(layer_output)
            h_n.append(layer_h_n)
        output=layer_output
        h_n = torch.stack(h_n, 0)   
        return output, h_n
vae.py 文件源码 项目:seqmod 作者: emanjavacas 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def argmax(self, z, max_len):
        # local variables
        eos, bos = self.src_dict.get_eos(), self.src_dict.get_bos()
        batch = z.size(0)
        # output variables
        scores, preds, mask = 0, [], z.data.new(batch).long() + 1
        # model inputs
        hidden = self.decoder.init_hidden_for(z)
        prev = Variable(z.data.new(batch).zero_().long() + bos, volatile=True)

        for _ in range(max_len):
            prev_emb = self.embeddings(prev).squeeze(0)
            dec_out, hidden = self.decoder(prev_emb, hidden, z=z)
            dec_out = self.project(dec_out.unsqueeze(0))

            score, pred = dec_out.max(1)
            scores += score.squeeze().data
            preds.append(pred.squeeze().data)
            prev = pred

            mask = mask * (pred.squeeze().data[0] != eos)
            if mask.int().sum() == 0:
                break

        return scores.tolist(), torch.stack(preds).transpose(0, 1).tolist()
lm.py 文件源码 项目:seqmod 作者: emanjavacas 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def forward(self, outs, emb):
        """
        Runs attention for a given input sequence

        Returns: output, weights
        --------
        output: torch.Tensor (seq_len x batch_size x hid_dim)
        weights: list of torch.Tensor(batch_size x 0:t-1) of length seq_len
        """
        emb_att = self.attn.project_emb(emb)
        output, weights = [], []
        for idx, hid in enumerate(outs):
            t = max(0, idx-1)  # use same hid at t=0
            context, weight = self.attn(
                outs[t], emb[:max(1, t)], emb_att=emb_att[:max(1, t)])
            output.append(self.hid2hid(hid) + self.emb2hid(context))
            weights.append(weight)
        return torch.stack(output), weights


问题


面经


文章

微信
公众号

扫码关注公众号