python类log_softmax()的实例源码

policies.py 文件源码 项目:drl.pth 作者: seba-1511 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def forward(self, x, *args, **kwargs):
        action = super(DiscretePolicy, self).forward(x, *args, **kwargs)
        probs = F.softmax(action.raw)
        action.value = probs.multinomial().detach()
        action.prob = lambda: probs.t()[action.value[:, 0]].mean(1)
        action.compute_log_prob = lambda a: F.log_softmax(action.raw).t()[a[:, 0]].mean(1)
        action.log_prob = action.compute_log_prob(action.value)
        action.entropy = -(action.prob() * action.log_prob)
        return action
example1.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example2_gradient.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)

        # Register a backward hook
        x.register_hook(myGradientHook)

        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example3.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example2_adv_example.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example5.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.elu(F.max_pool2d(self.conv1(x), 2))
        x = F.elu(F.max_pool2d(self.bn2(self.conv2(x)), 2))
        x = F.elu(F.max_pool2d(self.bn3(self.conv3(x)), 2))
        x = F.elu(F.max_pool2d(self.bn4(self.conv4(x)), 2))

        x = x.view(-1, 750)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example6_squeezenet.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.dropout(x, training=self.training)
        x = self.conv(x)
        x = self.avgpool(x)
        x = F.log_softmax(x)
        x = x.squeeze(dim=3).squeeze(dim=2)
        return x
loss.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cross_entropy2d(input, target, weight=None, size_average=True):
    n, c, h, w = input.size()
    log_p = F.log_softmax(input, dim=1)
    log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
    log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
    log_p = log_p.view(-1, c)

    mask = target >= 0
    target = target[mask]
    loss = F.nll_loss(log_p, target, weight=weight, size_average=False)
    if size_average:
        loss /= mask.data.sum()
    return loss
loss.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def bootstrapped_cross_entropy2d(input, target, K, weight=None, size_average=True):

    batch_size = input.size()[0]

    def _bootstrap_xentropy_single(input, target, K, weight=None, size_average=True):
        n, c, h, w = input.size()
        log_p = F.log_softmax(input, dim=1)
        log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
        log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
        log_p = log_p.view(-1, c)

        mask = target >= 0
        target = target[mask]
        loss = F.nll_loss(log_p, target, weight=weight, reduce=False, size_average=False)
        topk_loss, _ = loss.topk(K)
        reduced_topk_loss = topk_loss.sum() / K

        return reduced_topk_loss

    loss = 0.0
    # Bootstrap from each image not entire batch
    for i in range(batch_size):
        loss += _bootstrap_xentropy_single(input=torch.unsqueeze(input[i], 0),
                                           target=torch.unsqueeze(target[i], 0),
                                           K=K,
                                           weight=weight,
                                           size_average=size_average)
    return loss / float(batch_size)
fcn.py 文件源码 项目:lsun-room 作者: leVirve 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def cross_entropy2d(pred, target, weight=None, size_average=True):
    n, num_classes, h, w = pred.size()

    log_p = F.log_softmax(pred)

    log_p = channel_first_to_last(log_p).view(-1, num_classes)
    target = channel_first_to_last(target).view(-1)
    loss = F.nll_loss(log_p, target, weight=weight, size_average=False)

    if size_average:
        loss /= (h * w * n)
    return loss
CNNLSTMPolicy.py 文件源码 项目:generals_a3c 作者: yilundu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, input):
        # TODO perhaps add batch normalization or layer normalization

        x = F.elu(self.conv1(input))
        x = F.elu(self.conv2(x))
        x = F.elu(self.conv3(x))

        # Next flatten the output to be batched into LSTM layers
        # The shape of x is batch_size, channels, height, width
        x = self.pre_lstm_bn(x)

        x = torch.transpose(x, 1, 3)
        x = torch.transpose(x, 1, 2)
        x = x.contiguous()

        x = x.view(x.size(0), self.batch, self.hidden_dim)
        x, hidden = self.lstm(x, (self.hidden_state, self.cell_state))
        self.hidden_state, self.cell_state = hidden

        x = torch.transpose(x, 2, 1)
        x = x.contiguous()
        x = x.view(x.size(0), self.hidden_dim, self.height, self.width)

        x = self.lstm_batch_norm(x)

        x = F.elu(self.conv4(x))
        x = F.elu(self.conv5(x))

        o_begin = self.begin_conv(x)
        o_end = self.end_conv(x)

        o_begin = o_begin.view(o_begin.size(0), -1)
        o_end = o_end.view(o_end.size(0), -1)

        o_begin = F.log_softmax(o_begin)
        o_end = F.log_softmax(o_end)

        return o_begin, o_end
densenet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def forward(self, x):
    out = self.conv1(x)
    out = self.trans1(self.dense1(out))
    out = self.trans2(self.dense2(out))
    out = self.dense3(out)
    out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
    out = F.log_softmax(self.fc(out))
    return out
criterion.py 文件源码 项目:FCN 作者: zengxianyu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, outputs, targets):
        return self.loss(F.log_softmax(outputs), targets)
colornet.py 文件源码 项目:colorNet-pytorch 作者: shufanwu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(self.bn1(self.fc1(x)))
        x = F.log_softmax(self.bn2(self.fc2(x)))
        return x
model.py 文件源码 项目:treelstm.pytorch 作者: dasguptar 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def forward(self, lvec, rvec):
        mult_dist = torch.mul(lvec, rvec)
        abs_dist = torch.abs(torch.add(lvec, -rvec))
        vec_dist = torch.cat((mult_dist, abs_dist), 1)

        out = F.sigmoid(self.wh(vec_dist))
        out = F.log_softmax(self.wp(out))
        return out


# putting the whole model together
torch_CBOW.py 文件源码 项目:pytorch_word2vec 作者: bamtercelboo 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, input_data):
        embeds = self.embed_layer(input_data).view((1, -1))
        output = F.relu(self.linear_1(embeds))
        output = F.log_softmax(self.linear_2(output))
        return output


# Helper function
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, input, hidden):
        encode = self.lookup_table(input)
        lstm_out, hidden = self.lstm(encode, hidden)
        lstm_out = F.dropout(lstm_out, p=self.dropout)
        out = self.lr(lstm_out.contiguous().view(-1, lstm_out.size(2)))
        return F.log_softmax(out), hidden
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, src, src_pos, tgt, tgt_pos):
        tgt, tgt_pos = tgt[:, :-1], tgt_pos[:, :-1]

        enc_outputs = self.enc(src, src_pos)
        dec_output = self.dec(enc_outputs, src, tgt, tgt_pos)

        out = self.linear(dec_output)

        return F.log_softmax(out.view(-1, self.dec_vocab_size))
transform.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def decode(self, seq, pos):
        def length_penalty(step, len_penalty_w=1.):
            return (torch.log(self.torch.FloatTensor([5 + step])) - torch.log(self.torch.FloatTensor([6])))*len_penalty_w

        top_seqs = [([BOS], 0)] * self.beam_size

        enc_outputs = self.model.enc(seq, pos)
        seq_beam = Variable(seq.data.repeat(self.beam_size, 1))
        enc_outputs_beam = [Variable(enc_output.data.repeat(self.beam_size, 1, 1)) for enc_output in enc_outputs]

        input_data = self.init_input()
        input_pos = torch.arange(1, 2).unsqueeze(0)
        input_pos = input_pos.repeat(self.beam_size, 1)
        input_pos = Variable(input_pos.long(), volatile=True)

        for step in range(1, self.args.max_word_len+1):
            if self.cuda:
                input_pos = input_pos.cuda()
                input_data = input_data.cuda()

            dec_output = self.model.dec(enc_outputs_beam,
                            seq_beam, input_data, input_pos)
            dec_output = dec_output[:, -1, :] # word level feature
            out = F.log_softmax(self.model.linear(dec_output))
            lp = length_penalty(step)

            top_seqs, all_done, un_dones = self.beam_search(out.data+lp, top_seqs)

            if all_done: break
            input_data = self.update_input(top_seqs)
            input_pos, src_seq_beam, enc_outputs_beam = self.update_state(step+1, seq, enc_outputs, un_dones)

        tgts = []
        for seq in top_seqs:
            cor_idxs, score = seq
            cor_idxs = cor_idxs[1: -1]
            tgts += [(" ".join([self.src_idx2word[idx] for idx in cor_idxs]), score)]
        return tgts
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 198 收藏 0 点赞 0 评论 0
def forward(self, input, hidden):
        encode = self.lookup_table(input)
        lstm_out, hidden = self.lstm(encode.transpose(0, 1), hidden)
        output = self.ln(lstm_out)[-1]
        return F.log_softmax(self.logistic(output)), hidden


问题


面经


文章

微信
公众号

扫码关注公众号