python类multinomial()的实例源码

neuralSearch.py 文件源码 项目:TikZ 作者: ellisk42 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def sample(self, seed, maximumLength, T = 1):
        h = self.h0(seed).view(self.layers, 1, self.H)

        accumulator = ["START"]
        for _ in range(maximumLength):
            i = self.targetsOfSymbols([accumulator[-1]])[:,0]
            output, h = self(i,h)
            distribution = output.data.view(-1)/T
            distribution = F.log_softmax(distribution).data
            distribution = distribution.exp()

            c = torch.multinomial(distribution,1)[0]
            if self.lexicon[c] == "END": break

            accumulator.append(self.lexicon[c])

        return accumulator[1:]
noTraceBaseline.py 文件源码 项目:TikZ 作者: ellisk42 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def sample(self, features):
        result = ["START"]

        # (1,1,F)
        features = features.view(-1).unsqueeze(0).unsqueeze(0)
        #features: 1x1x2560

        states = None

        while True:
            e = self.embedding(variable([symbolToIndex[result[-1]]]).view((1,-1)))
            recurrentInput = torch.cat((features,e),2)
            output, states = self.rnn(recurrentInput,states)
            distribution = self.tokenPrediction(output).view(-1)
            distribution = F.log_softmax(distribution).data.exp()
            draw = torch.multinomial(distribution,1)[0]
            c = LEXICON[draw]
            if len(result) > 20 or c == "END":
                return result[1:]
            else:
                result.append(c)
train.py 文件源码 项目:char-rnn 作者: hiepph 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def generate(self, prime_str, predict_len=100, temperature=0.8):
        predicted = prime_str

        hidden = self.decoder.init_hidden()
        prime_input = char_tensor(prime_str, self.decoder.gpu)

        # Use prime string to build up hidden state
        for p in range(len(prime_str) - 1):
            _, hidden = self.decoder(prime_input[p], hidden)

        inp  = prime_input[-1]
        for p in range(predict_len):
            out, hidden = self.decoder(inp, hidden)

            # sample from network as a multinomial distribution out_dist = out.data.view(-1).div(temperature).exp()
            out_dist = out.data.view(-1).div(temperature).exp()
            top_i = torch.multinomial(out_dist, 1)[0]

            # Add predicted character to string and use as next input
            predicted_char = all_characters[top_i]
            predicted += predicted_char
            inp = char_tensor(predicted_char, self.decoder.gpu)

        return predicted
generator.py 文件源码 项目:seqGAN 作者: suragnair 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def sample(self, num_samples, start_letter=0):
        """
        Samples the network and returns num_samples samples of length max_seq_len.

        Outputs: samples, hidden
            - samples: num_samples x max_seq_length (a sampled sequence in each row)
        """

        samples = torch.zeros(num_samples, self.max_seq_len).type(torch.LongTensor)

        h = self.init_hidden(num_samples)
        inp = autograd.Variable(torch.LongTensor([start_letter]*num_samples))

        if self.gpu:
            samples = samples.cuda()
            inp = inp.cuda()

        for i in range(self.max_seq_len):
            out, h = self.forward(inp, h)               # out: num_samples x vocab_size
            out = torch.multinomial(torch.exp(out), 1)  # num_samples x 1 (sampling from each row)
            samples[:, i] = out.data

            inp = out.view(-1)

        return samples
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_multinomial(self):
        # with replacement
        n_row = 3
        for n_col in range(4, 5+1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col-1).fill_(0) #index n_col shouldn't be sampled
            n_sample = n_col
            sample_indices = torch.multinomial(prob_dist, n_sample, True)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for index in product(range(n_row), range(n_sample)):
                self.assertNotEqual(sample_indices[index], n_col, "sampled an index with zero probability")

        # without replacement
        n_row = 3
        for n_col in range(4, 5+1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col-1).fill_(0) #index n_col shouldn't be sampled
            n_sample = 3
            sample_indices = torch.multinomial(prob_dist, n_sample, False)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for i in range(n_row):
                row_samples = {}
                for j in range(n_sample):
                    sample_idx = sample_indices[i,j]
                    self.assertNotEqual(sample_idx, n_col-1,
                            "sampled an index with zero probability")
                    self.assertNotIn(sample_idx, row_samples, "sampled an index twice")
                    row_samples[sample_idx] = True

        # vector
        n_col = 4
        prob_dist = torch.rand(n_col)
        n_sample = n_col
        sample_indices = torch.multinomial(prob_dist, n_sample, True)
        s_dim = sample_indices.dim()
        self.assertEqual(sample_indices.dim(), 1, "wrong number of dimensions")
        self.assertEqual(prob_dist.dim(), 1, "wrong number of prob_dist dimensions")
        self.assertEqual(sample_indices.size(0), n_sample, "wrong number of samples")
util.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def torch_multinomial(input, num_samples, replacement=False):
    """
    Like `torch.multinomial()` but works with cuda tensors.
    Does not support keyword argument `out`.
    """
    if input.is_cuda:
        return torch_multinomial(input.cpu(), num_samples, replacement).cuda()
    else:
        return torch.multinomial(input, num_samples, replacement)
generate.py 文件源码 项目:Tree-LSTM-LM 作者: vgene 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def generate(model, prime_str='A', predict_len=100, temperature=0.8, cuda=False):
    hidden = model.init_hidden(1)
    tensor = char_tensor(prime_str, model.mapping)
    prime_input = Variable(tensor.unsqueeze(0))
    #print(prime_input)
    if cuda:
        hidden = tuple(h.cuda() for h in hidden)
        prime_input = prime_input.cuda()
    predicted = prime_str
    model.seq_length = 1

    #print(hidden)
    #print(prime_input[:,0])
    # Use priming string to "build up" hidden state
    for p in range(len(prime_str) - 1):
        _, hidden = model(prime_input[:,p], hidden)

    inp = prime_input[:,-1]

    for p in range(predict_len):
        output, hidden = model(inp, hidden)

        # Sample from the network as a multinomial distribution
        output_dist = output.data.view(-1).div(temperature).exp()
        top_i = torch.multinomial(output_dist, 1)[0]

        # Add predicted character to string and use as next input
        predicted_char = model.mapping[top_i]
        predicted += predicted_char
        inp = Variable(char_tensor(predicted_char, model.mapping).unsqueeze(0))
        if cuda:
            inp = inp.cuda()

    return predicted

# Run as standalone script
train.py 文件源码 项目:neural-hacker-typer 作者: anjishnu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def generate(self,
                  prime_str='int ',
                  predict_len=100,
                  temperature=0.1,
                  cuda=False,
                  args=None,
                  hidden=None):

          prime_input = Variable(char_tensor(prime_str).unsqueeze(0))

          if not hidden:
               hidden = decoder.init_hidden(1)
               prime_input = Variable(char_tensor(prime_str).unsqueeze(0))

               if cuda:
                    hidden = hidden.cuda()
                    prime_input = prime_input.cuda()        
                    # Use priming string to "build up" hidden state
               for p in range(len(prime_str) - 1):
                    _, hidden = decoder(prime_input[:,p], hidden)        

          predicted = ''
          inp = prime_input[:,-1]
          p_list = []


          for p in range(predict_len):
               output, hidden = decoder(inp, hidden)        
               # Sample from the network as a multinomial distribution
               output_dist = output.data.view(-1).div(temperature).exp()
               top_i = torch.multinomial(output_dist, 1)[0]
               p_list.append(top_i)
               # Add predicted character to string and use as next input
               predicted_char = all_characters[top_i]

               predicted += predicted_char
               inp = Variable(char_tensor(predicted_char).unsqueeze(0))
               if cuda: inp = inp.cuda()

          # print (p_list)
          return predicted, hidden
train.py 文件源码 项目:neural-hacker-typer 作者: anjishnu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def generate(decoder,
             prime_str='int ',
             predict_len=100,
             temperature=0.35,
             cuda=False,
             args=None,
             hidden=None):

     prime_input = Variable(char_tensor(prime_str).unsqueeze(0))

     if not hidden:
          hidden = decoder.init_hidden(1)
          prime_input = Variable(char_tensor(prime_str).unsqueeze(0))

          if cuda:
               hidden = hidden.cuda()
               prime_input = prime_input.cuda()        
          # Use priming string to "build up" hidden state
          for p in range(len(prime_str) - 1):
               _, hidden = decoder(prime_input[:,p], hidden)        

     predicted = ''
     inp = prime_input[:,-1]
     p_list = []


     for p in range(predict_len):
          output, hidden = decoder(inp, hidden)        
          # Sample from the network as a multinomial distribution
          output_dist = output.data.view(-1).div(temperature).exp()
          top_i = torch.multinomial(output_dist, 1)[0]
          p_list.append(top_i)
          # Add predicted character to string and use as next input
          predicted_char = all_characters[top_i]

          predicted += predicted_char
          inp = Variable(char_tensor(predicted_char).unsqueeze(0))
          if cuda: inp = inp.cuda()

     # print (p_list)
     return predicted, hidden
pytorch_model.py 文件源码 项目:char-rnn-text-generation 作者: yxtay 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def sample_from_probs(probs, top_n=10):
    """
    truncated weighted random choice.
    """
    _, indices = torch.sort(probs)
    # set probabilities after top_n to 0
    probs[indices.data[:-top_n]] = 0
    sampled_index = torch.multinomial(probs, 1)
    return sampled_index
model.py 文件源码 项目:treehopper 作者: tomekkorbak 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def node_forward(self, inputs, child_c, child_h, training):
        child_h_sum = F.torch.sum(torch.squeeze(child_h, 1), 0, keepdim = True)

        i = F.sigmoid(self.ix(inputs)+self.ih(child_h_sum))
        o = F.sigmoid(self.ox(inputs)+self.oh(child_h_sum))
        u = F.tanh(self.ux(inputs)+self.uh(child_h_sum))

        # add extra singleton dimension
        fx = F.torch.unsqueeze(self.fx(inputs), 1)
        f = F.torch.cat([self.fh(child_hi) + torch.squeeze(fx, 1) for child_hi in child_h], 0)
        # f = torch.squeeze(f, 0)
        f = F.sigmoid(f)
        # removing extra singleton dimension
        f = F.torch.unsqueeze(f, 1)
        fc = F.torch.squeeze(F.torch.mul(f, child_c), 1)

        idx = Var(torch.multinomial(torch.ones(child_c.size(0)), 1), requires_grad=False)
        if self.cuda_flag:
            idx = idx.cuda()

        c = zoneout(
            current_input=F.torch.mul(i, u) + F.torch.sum(fc, 0, keepdim=True),
            previous_input=F.torch.squeeze(child_c.index_select(0, idx), 0) if self.zoneout_choose_child else F.torch.sum(torch.squeeze(child_c, 1), 0, keepdim=True),
            p=self.recurrent_dropout_c,
            training=training,
            mask=self.mask if self.commons_mask else None
        )
        h = zoneout(
            current_input=F.torch.mul(o, F.tanh(c)),
            previous_input=F.torch.squeeze(child_h.index_select(0, idx), 0) if self.zoneout_choose_child else child_h_sum,
            p=self.recurrent_dropout_h,
            training=training,
            mask=self.mask if self.commons_mask else None
        )

        return c, h
neg.py 文件源码 项目:pytorch_NEG_loss 作者: kefirski 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def sample(self, num_sample):
        """
        draws a sample from classes based on weights
        """
        return t.multinomial(self.weights, num_sample, True)
sampler.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __iter__(self):
        return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def test_multinomial(self):
        # with replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = n_col
            sample_indices = torch.multinomial(prob_dist, n_sample, True)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for index in product(range(n_row), range(n_sample)):
                self.assertNotEqual(sample_indices[index], n_col, "sampled an index with zero probability")

        # without replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = 3
            sample_indices = torch.multinomial(prob_dist, n_sample, False)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for i in range(n_row):
                row_samples = {}
                for j in range(n_sample):
                    sample_idx = sample_indices[i, j]
                    self.assertNotEqual(sample_idx, n_col - 1,
                                        "sampled an index with zero probability")
                    self.assertNotIn(sample_idx, row_samples, "sampled an index twice")
                    row_samples[sample_idx] = True

        # vector
        n_col = 4
        prob_dist = torch.rand(n_col)
        n_sample = n_col
        sample_indices = torch.multinomial(prob_dist, n_sample, True)
        s_dim = sample_indices.dim()
        self.assertEqual(sample_indices.dim(), 1, "wrong number of dimensions")
        self.assertEqual(prob_dist.dim(), 1, "wrong number of prob_dist dimensions")
        self.assertEqual(sample_indices.size(0), n_sample, "wrong number of samples")
data.py 文件源码 项目:sk-torch 作者: mattHawthorn 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def neg_samples(self, batch_size: int):
        n_samples = batch_size * self.n_neg_samples
        return multinomial(self.output_dist, num_samples=n_samples, replacement=True)
sampler.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __iter__(self):
        return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_multinomial(self):
        # with replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = n_col
            sample_indices = torch.multinomial(prob_dist, n_sample, True)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for index in product(range(n_row), range(n_sample)):
                self.assertNotEqual(sample_indices[index], n_col, "sampled an index with zero probability")

        # without replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = 3
            sample_indices = torch.multinomial(prob_dist, n_sample, False)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for i in range(n_row):
                row_samples = {}
                for j in range(n_sample):
                    sample_idx = sample_indices[i, j]
                    self.assertNotEqual(sample_idx, n_col - 1,
                                        "sampled an index with zero probability")
                    self.assertNotIn(sample_idx, row_samples, "sampled an index twice")
                    row_samples[sample_idx] = True

        # vector
        n_col = 4
        prob_dist = torch.rand(n_col)
        n_sample = n_col
        sample_indices = torch.multinomial(prob_dist, n_sample, True)
        s_dim = sample_indices.dim()
        self.assertEqual(sample_indices.dim(), 1, "wrong number of dimensions")
        self.assertEqual(prob_dist.dim(), 1, "wrong number of prob_dist dimensions")
        self.assertEqual(sample_indices.size(0), n_sample, "wrong number of samples")
sampler.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __iter__(self):
        return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_multinomial(self):
        # with replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = n_col
            sample_indices = torch.multinomial(prob_dist, n_sample, True)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for index in product(range(n_row), range(n_sample)):
                self.assertNotEqual(sample_indices[index], n_col, "sampled an index with zero probability")

        # without replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = 3
            sample_indices = torch.multinomial(prob_dist, n_sample, False)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for i in range(n_row):
                row_samples = {}
                for j in range(n_sample):
                    sample_idx = sample_indices[i, j]
                    self.assertNotEqual(sample_idx, n_col - 1,
                                        "sampled an index with zero probability")
                    self.assertNotIn(sample_idx, row_samples, "sampled an index twice")
                    row_samples[sample_idx] = True

        # vector
        n_col = 4
        prob_dist = torch.rand(n_col)
        n_sample = n_col
        sample_indices = torch.multinomial(prob_dist, n_sample, True)
        s_dim = sample_indices.dim()
        self.assertEqual(sample_indices.dim(), 1, "wrong number of dimensions")
        self.assertEqual(prob_dist.dim(), 1, "wrong number of prob_dist dimensions")
        self.assertEqual(sample_indices.size(0), n_sample, "wrong number of samples")
categorical_random_variable.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def sample(self, n_samples=1):
        mass_function = self.mass_function.data
        res = torch.multinomial(mass_function, n_samples, replacement=True)

        # Sample dimension is first
        if res.ndimension() == 2:
            res = res.t()
        return res
utils.py 文件源码 项目:sourceseparation_misc 作者: ycemsubakan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prepare_mixture_gm_data(arguments):
    dataset = []

    arguments.L2 = 2
    arguments.L1 = 2
    arguments.K = 200
    sig0 = 5
    sig = 0.1

    num_means = arguments.num_means
    means = 5*torch.randn(num_means, arguments.L2) 
    arguments.means = means.numpy()

    N = 2000

    mixinds = torch.multinomial(torch.ones(num_means), N, replacement=True) 
    obsnoise = torch.randn(N, arguments.L2) 

    data = means[mixinds] + obsnoise
    inp = torch.randn(N, arguments.L1) 

    dataset1 = TensorDataset(inp, data, [1]*N)
    datasetmix = dataset1 

    kwargs = {'num_workers': 1, 'pin_memory': True} if arguments.cuda else {}
    loader1 = data_utils.DataLoader(dataset1, batch_size=arguments.batch_size, shuffle=False, **kwargs)
    loader_mix = data_utils.DataLoader(datasetmix, batch_size=arguments.batch_size, shuffle=False, **kwargs)

    return loader1, loader_mix
FCModel.py 文件源码 项目:self-critical.pytorch 作者: ruotianluo 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)
        outputs = []

        for i in range(seq.size(1)):
            if i == 0:
                xt = self.img_embed(fc_feats)
            else:
                if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
                    sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                    sample_mask = sample_prob < self.ss_prob
                    if sample_mask.sum() == 0:
                        it = seq[:, i-1].clone()
                    else:
                        sample_ind = sample_mask.nonzero().view(-1)
                        it = seq[:, i-1].data.clone()
                        #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                        #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                        prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                        it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                        it = Variable(it, requires_grad=False)
                else:
                    it = seq[:, i-1].clone()
                # break if all the sequences end
                if i >= 2 and seq[:, i-1].data.sum() == 0:
                    break
                xt = self.embed(it)

            output, state = self.core(xt, state)
            output = F.log_softmax(self.logit(output))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
OldModel.py 文件源码 项目:self-critical.pytorch 作者: ruotianluo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(fc_feats)

        outputs = []

        for i in range(seq.size(1) - 1):
            if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
                sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                sample_mask = sample_prob < self.ss_prob
                if sample_mask.sum() == 0:
                    it = seq[:, i].clone()
                else:
                    sample_ind = sample_mask.nonzero().view(-1)
                    it = seq[:, i].data.clone()
                    #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                    #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                    prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                    it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                    it = Variable(it, requires_grad=False)
            else:
                it = seq[:, i].clone()          
            # break if all the sequences end
            if i >= 1 and seq[:, i].data.sum() == 0:
                break

            xt = self.embed(it)

            output, state = self.core(xt, fc_feats, att_feats, state)
            output = F.log_softmax(self.logit(self.dropout(output)))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
ShowTellModel.py 文件源码 项目:self-critical.pytorch 作者: ruotianluo 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)
        outputs = []

        for i in range(seq.size(1)):
            if i == 0:
                xt = self.img_embed(fc_feats)
            else:
                if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
                    sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                    sample_mask = sample_prob < self.ss_prob
                    if sample_mask.sum() == 0:
                        it = seq[:, i-1].clone()
                    else:
                        sample_ind = sample_mask.nonzero().view(-1)
                        it = seq[:, i-1].data.clone()
                        #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                        #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                        prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                        it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                        it = Variable(it, requires_grad=False)
                else:
                    it = seq[:, i-1].clone()                
                # break if all the sequences end
                if i >= 2 and seq[:, i-1].data.sum() == 0:
                    break
                xt = self.embed(it)

            output, state = self.core(xt.unsqueeze(0), state)
            output = F.log_softmax(self.logit(self.dropout(output.squeeze(0))))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
Att2inModel.py 文件源码 项目:self-critical.pytorch 作者: ruotianluo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)

        outputs = []

        # Project the attention feats first to reduce memory and computation comsumptions.
        p_att_feats = self.ctx2att(att_feats.view(-1, self.att_feat_size))
        p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))

        for i in range(seq.size(1) - 1):
            if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
                sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                sample_mask = sample_prob < self.ss_prob
                if sample_mask.sum() == 0:
                    it = seq[:, i].clone()
                else:
                    sample_ind = sample_mask.nonzero().view(-1)
                    it = seq[:, i].data.clone()
                    #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                    #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                    prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                    it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                    it = Variable(it, requires_grad=False)
            else:
                it = seq[:, i].clone()          
            # break if all the sequences end
            if i >= 1 and seq[:, i].data.sum() == 0:
                break

            xt = self.embed(it)

            output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
            output = F.log_softmax(self.logit(output))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
dataset.py 文件源码 项目:pytorch-planet-amazon 作者: rwightman 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __iter__(self):
        base_samples = torch.arange(0, len(self.weights)).long()
        remaining = self.num_samples - len(self.weights)
        over_samples = torch.multinomial(self.weights, remaining, True)
        samples = torch.cat((base_samples, over_samples), dim=0)
        print('num samples', len(samples))
        return (samples[i] for i in torch.randperm(len(samples)))
neuralSearch.py 文件源码 项目:TikZ 作者: ellisk42 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sampleEnvironment(self, s, environments, T = 1):
        problem = self.encodeProblem(s).view(1,-1)
        environmentScores = self.environmentLogLikelihoods(environments, problem)
        distribution = (environmentScores/T).exp()
        i = torch.multinomial(distribution.data, 1)[0]
        return environments[i]
sampler.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __iter__(self):
        return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
categorical.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sample(self, sample_shape=torch.Size()):
        num_events = self.probs.size()[-1]
        sample_shape = self._extended_shape(sample_shape)
        param_shape = sample_shape + self.probs.size()[-1:]
        probs = self.probs.expand(param_shape)
        probs_2d = probs.contiguous().view(-1, num_events)
        sample_2d = torch.multinomial(probs_2d, 1, True)
        return sample_2d.contiguous().view(sample_shape)
test_torch.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_multinomial(self):
        # with replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = n_col
            sample_indices = torch.multinomial(prob_dist, n_sample, True)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for index in product(range(n_row), range(n_sample)):
                self.assertNotEqual(sample_indices[index], n_col, "sampled an index with zero probability")

        # without replacement
        n_row = 3
        for n_col in range(4, 5 + 1):
            prob_dist = torch.rand(n_row, n_col)
            prob_dist.select(1, n_col - 1).fill_(0)  # index n_col shouldn't be sampled
            n_sample = 3
            sample_indices = torch.multinomial(prob_dist, n_sample, False)
            self.assertEqual(prob_dist.dim(), 2)
            self.assertEqual(sample_indices.size(1), n_sample)
            for i in range(n_row):
                row_samples = {}
                for j in range(n_sample):
                    sample_idx = sample_indices[i, j]
                    self.assertNotEqual(sample_idx, n_col - 1,
                                        "sampled an index with zero probability")
                    self.assertNotIn(sample_idx, row_samples, "sampled an index twice")
                    row_samples[sample_idx] = True

        # vector
        n_col = 4
        prob_dist = torch.rand(n_col)
        n_sample = n_col
        sample_indices = torch.multinomial(prob_dist, n_sample, True)
        s_dim = sample_indices.dim()
        self.assertEqual(sample_indices.dim(), 1, "wrong number of dimensions")
        self.assertEqual(prob_dist.dim(), 1, "wrong number of prob_dist dimensions")
        self.assertEqual(sample_indices.size(0), n_sample, "wrong number of samples")


问题


面经


文章

微信
公众号

扫码关注公众号