python类cross_entropy()的实例源码

imsitu_model.py 文件源码 项目:verb-attributes 作者: uwnlp 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def ours_train(m, x, labels, data, att_crit=None, optimizers=None):
    """
    Train the direct attribute prediction model
    :param m: Model we're using
    :param x: [batch_size, 3, 224, 224] Image input
    :param labels: [batch_size] variable with indices of the right verbs
    :param embeds: [vocab_size, 300] Variables with embeddings of all of the verbs
    :param atts_matrix: [vocab_size, att_dim] matrix with GT attributes of the verbs
    :param att_crit: AttributeLoss module that computes the loss
    :param optimizers: the decorator will use these to update parameters
    :return: 
    """
    logits = ours_logits(m, x, data, att_crit=att_crit)
    loss = m.l2_penalty
    if len(logits) == 1:
        loss += F.cross_entropy(logits[0], labels, size_average=True)
    else:
        sum_logits = sum(logits)
        for l in logits:
            loss += F.cross_entropy(l, labels, size_average=True)/(len(logits)+1)
        loss += F.cross_entropy(sum_logits, labels, size_average=True)/(len(logits)+1)
    return loss
losses.py 文件源码 项目:jaccardSegment 作者: bermanmaxim 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def crossentropyloss(logits, label):
    mask = (label.view(-1) != VOID_LABEL)
    nonvoid = mask.long().sum()
    if nonvoid == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    # if nonvoid == mask.numel():
    #     # no void pixel, use builtin
    #     return F.cross_entropy(logits, Variable(label))
    target = label.view(-1)[mask]
    C = logits.size(1)
    logits = logits.permute(0, 2, 3, 1)  # B, H, W, C
    logits = logits.contiguous().view(-1, C)
    mask2d = mask.unsqueeze(1).expand(mask.size(0), C).contiguous().view(-1)
    logits = logits[mask2d].view(-1, C)
    loss = F.cross_entropy(logits, Variable(target))
    return loss
example6.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.cross_entropy(output, target)
        loss.backward()
        optimizer.step()
        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
            epoch, batch_idx * len(data), len(train_loader.dataset),
            100. * batch_idx / len(train_loader), loss.data[0]))
example6.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test():
    model.eval()
    test_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(test_loader):
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
        pred = output.data.max(1)[1] # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
example5.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.cross_entropy(output, target)
        loss.backward()
        optimizer.step()
        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
            epoch, batch_idx * len(data), len(train_loader.dataset),
            100. * batch_idx / len(train_loader), loss.data[0]))
example5.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test():
    model.eval()
    test_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(test_loader):
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
        pred = output.data.max(1)[1] # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
example6_squeezenet.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test():
    model.eval()
    test_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(test_loader):
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
        pred = output.data.max(1)[1] # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
focal_loss.py 文件源码 项目:RetinaNet 作者: c0nn3r 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def forward(self, output, target):

        cross_entropy = F.cross_entropy(output, target)
        cross_entropy_log = torch.log(cross_entropy)

        focal_loss = -((1 - cross_entropy) ** self.focusing_param) * cross_entropy_log

        balanced_focal_loss = self.balance_param * focal_loss

        return balanced_focal_loss
train.py 文件源码 项目:colorNet-pytorch 作者: shufanwu 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def train(epoch):
    color_model.train()

    try:
        for batch_idx, (data, classes) in enumerate(train_loader):
            messagefile = open('./message.txt', 'a')
            original_img = data[0].unsqueeze(1).float()
            img_ab = data[1].float()
            if have_cuda:
                original_img = original_img.cuda()
                img_ab = img_ab.cuda()
                classes = classes.cuda()
            original_img = Variable(original_img)
            img_ab = Variable(img_ab)
            classes = Variable(classes)
            optimizer.zero_grad()
            class_output, output = color_model(original_img, original_img)
            ems_loss = torch.pow((img_ab - output), 2).sum() / torch.from_numpy(np.array(list(output.size()))).prod()
            cross_entropy_loss = 1/300 * F.cross_entropy(class_output, classes)
            loss = ems_loss + cross_entropy_loss
            lossmsg = 'loss: %.9f\n' % (loss.data[0])
            messagefile.write(lossmsg)
            ems_loss.backward(retain_variables=True)
            cross_entropy_loss.backward()
            optimizer.step()
            if batch_idx % 500 == 0:
                message = 'Train Epoch:%d\tPercent:[%d/%d (%.0f%%)]\tLoss:%.9f\n' % (
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                    100. * batch_idx / len(train_loader), loss.data[0])
                messagefile.write(message)
                torch.save(color_model.state_dict(), 'colornet_params.pkl')
            messagefile.close()
                # print('Train Epoch: {}[{}/{}({:.0f}%)]\tLoss: {:.9f}\n'.format(
                #     epoch, batch_idx * len(data), len(train_loader.dataset),
                #     100. * batch_idx / len(train_loader), loss.data[0]))
    except Exception:
        logfile = open('log.txt', 'w')
        logfile.write(traceback.format_exc())
        logfile.close()
    finally:
        torch.save(color_model.state_dict(), 'colornet_params.pkl')
train.py 文件源码 项目:cnn-text-classification-pytorch 作者: Shawn1993 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def train(train_iter, dev_iter, model, args):
    if args.cuda:
        model.cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    steps = 0
    model.train()
    for epoch in range(1, args.epochs+1):
        for batch in train_iter:
            feature, target = batch.text, batch.label
            feature.data.t_(), target.data.sub_(1)  # batch first, index align
            if args.cuda:
                feature, target = feature.cuda(), target.cuda()

            optimizer.zero_grad()
            logit = model(feature)

            #print('logit vector', logit.size())
            #print('target vector', target.size())
            loss = F.cross_entropy(logit, target)
            loss.backward()
            optimizer.step()

            steps += 1
            if steps % args.log_interval == 0:
                corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
                accuracy = 100.0 * corrects/batch.batch_size
                sys.stdout.write(
                    '\rBatch[{}] - loss: {:.6f}  acc: {:.4f}%({}/{})'.format(steps, 
                                                                             loss.data[0], 
                                                                             accuracy,
                                                                             corrects,
                                                                             batch.batch_size))
            if steps % args.test_interval == 0:
                eval(dev_iter, model, args)
            if steps % args.save_interval == 0:
                if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir)
                save_prefix = os.path.join(args.save_dir, 'snapshot')
                save_path = '{}_steps{}.pt'.format(save_prefix, steps)
                torch.save(model, save_path)
oim.py 文件源码 项目:open-reid 作者: Cysu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, inputs, targets):
        inputs = oim(inputs, targets, self.lut, momentum=self.momentum)
        inputs *= self.scalar
        loss = F.cross_entropy(inputs, targets, weight=self.weight,
                               size_average=self.size_average)
        return loss, inputs
train_ALL_LSTM.py 文件源码 项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch 作者: bamtercelboo 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def eval(data_iter, model, args, scheduler):
    model.eval()
    corrects, avg_loss = 0, 0
    for batch in data_iter:
        feature, target = batch.text, batch.label
        target.data.sub_(1)
        # feature.data.t_(), target.data.sub_(1)  # batch first, index align
        # feature.data.t_(),\
        # target.data.sub_(1)  # batch first, index align
        # target = autograd.Variable(target)
        if args.cuda is True:
            feature, target = feature.cuda(), target.cuda()

        model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
        if feature.size(1) != args.batch_size:
            # continue
            model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
        logit = model(feature)
        loss = F.cross_entropy(logit, target, size_average=False)
        # scheduler.step(loss.data[0])

        avg_loss += loss.data[0]
        corrects += (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()

    size = len(data_iter.dataset)
    avg_loss = loss.data[0]/size
    accuracy = float(corrects)/size * 100.0
    model.train()
    print('\nEvaluation - loss: {:.6f}  acc: {:.4f}%({}/{}) \n'.format(avg_loss,
                                                                       accuracy,
                                                                       corrects,
                                                                       size))
train_ALL_CNN_1.py 文件源码 项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch 作者: bamtercelboo 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def eval(data_iter, model, args, scheduler):
    model.eval()
    corrects, avg_loss = 0, 0
    for batch in data_iter:
        feature, target = batch.text, batch.label
        feature.data.t_(), target.data.sub_(1)  # batch first, index align
        if args.cuda:
            feature, target = feature.cuda(), feature.cuda()

        logit = model(feature)
        loss = F.cross_entropy(logit, target, size_average=False)
        # scheduler.step(loss.data[0])
        # if args.init_clip_max_norm is not None:
        #     # print("aaaa {} ".format(args.init_clip_max_norm))
        #     utils.clip_grad_norm(model.parameters(), max_norm=args.init_clip_max_norm)

        avg_loss += loss.data[0]
        corrects += (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()

    size = len(data_iter.dataset)
    avg_loss = loss.data[0]/size
    # accuracy = float(corrects)/size * 100.0
    accuracy = 100.0 * corrects/size
    model.train()
    print('\nEvaluation - loss: {:.6f}  acc: {:.4f}%({}/{}) \n'.format(avg_loss,
                                                                       accuracy,
                                                                       corrects,
                                                                       size))
faster_rcnn.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        super(RPN, self).__init__()

        self.features = VGG16(bn=False)
        self.conv1 = Conv2d(512, 512, 3, same_padding=True)
        self.score_conv = Conv2d(512, len(self.anchor_scales) * 3 * 2, 1, relu=False, same_padding=False)
        self.bbox_conv = Conv2d(512, len(self.anchor_scales) * 3 * 4, 1, relu=False, same_padding=False)

        # loss
        self.cross_entropy = None
        self.los_box = None
faster_rcnn.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss(self):
        return self.cross_entropy + self.loss_box * 10
faster_rcnn.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, im_data, im_info, gt_boxes=None, gt_ishard=None, dontcare_areas=None):
        im_data = network.np_to_variable(im_data, is_cuda=True)
        im_data = im_data.permute(0, 3, 1, 2)
        features = self.features(im_data)

        rpn_conv1 = self.conv1(features)

        # rpn score
        rpn_cls_score = self.score_conv(rpn_conv1)
        rpn_cls_score_reshape = self.reshape_layer(rpn_cls_score, 2)
        rpn_cls_prob = F.softmax(rpn_cls_score_reshape)
        rpn_cls_prob_reshape = self.reshape_layer(rpn_cls_prob, len(self.anchor_scales)*3*2)

        # rpn boxes
        rpn_bbox_pred = self.bbox_conv(rpn_conv1)

        # proposal layer
        cfg_key = 'TRAIN' if self.training else 'TEST'
        rois = self.proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info,
                                   cfg_key, self._feat_stride, self.anchor_scales)

        # generating training labels and build the rpn loss
        if self.training:
            assert gt_boxes is not None
            rpn_data = self.anchor_target_layer(rpn_cls_score, gt_boxes, gt_ishard, dontcare_areas,
                                                im_info, self._feat_stride, self.anchor_scales)
            self.cross_entropy, self.loss_box = self.build_loss(rpn_cls_score_reshape, rpn_bbox_pred, rpn_data)

        return features, rois
faster_rcnn.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_loss(self, cls_score, bbox_pred, roi_data):
        # classification loss
        label = roi_data[1].squeeze()
        fg_cnt = torch.sum(label.data.ne(0))
        bg_cnt = label.data.numel() - fg_cnt

        # for log
        if self.debug:
            maxv, predict = cls_score.data.max(1)
            self.tp = torch.sum(predict[:fg_cnt].eq(label.data[:fg_cnt])) if fg_cnt > 0 else 0
            self.tf = torch.sum(predict[fg_cnt:].eq(label.data[fg_cnt:]))
            self.fg_cnt = fg_cnt
            self.bg_cnt = bg_cnt

        ce_weights = torch.ones(cls_score.size()[1])
        ce_weights[0] = float(fg_cnt) / bg_cnt
        ce_weights = ce_weights.cuda()
        cross_entropy = F.cross_entropy(cls_score, label, weight=ce_weights)

        # bounding box regression L1 loss
        bbox_targets, bbox_inside_weights, bbox_outside_weights = roi_data[2:]
        bbox_targets = torch.mul(bbox_targets, bbox_inside_weights)
        bbox_pred = torch.mul(bbox_pred, bbox_inside_weights)

        loss_box = F.smooth_l1_loss(bbox_pred, bbox_targets, size_average=False) / (fg_cnt + 1e-4)

        return cross_entropy, loss_box
MSDN_base.py 文件源码 项目:MSDN 作者: yikang-li 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_loss_cls(self, cls_score, labels):
        labels = labels.squeeze()
        fg_cnt = torch.sum(labels.data.ne(0))
        bg_cnt = labels.data.numel() - fg_cnt

        ce_weights = np.sqrt(self.predicate_loss_weight)
        ce_weights[0] = float(fg_cnt) / (bg_cnt + 1e-5)
        ce_weights = ce_weights.cuda()
        # print '[relationship]:'
        # print 'ce_weights:'
        # print ce_weights
        # print 'cls_score:'
        # print cls_score 
        # print 'labels'
        # print labels
        ce_weights = ce_weights.cuda()
        cross_entropy = F.cross_entropy(cls_score, labels, weight=ce_weights)

        maxv, predict = cls_score.data.max(1)
        # if DEBUG:
        # print '[predicate]:'
        # if predict.sum() > 0:
        # print predict
        # print 'labels'
        # print labels

        if fg_cnt == 0:
            tp = 0
        else:
            tp = torch.sum(predict[bg_cnt:].eq(labels.data[bg_cnt:]))
        tf = torch.sum(predict[:bg_cnt].eq(labels.data[:bg_cnt]))
        fg_cnt = fg_cnt
        bg_cnt = bg_cnt

        return cross_entropy, tp, tf, fg_cnt, bg_cnt
runner_kfold.py 文件源码 项目:semanaly 作者: zqhZY 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def eval(data_iter, model, args):
    model.eval()
    corrects, avg_loss = 0, 0
    for batch in data_iter:
        feature, target = batch.text, batch.labels
        target.data.sub_(1)  # batch first, index align

        x = feature.data.numpy()
        x = x.T
        feature = autograd.Variable(torch.from_numpy(x))

        if args.cuda:
            feature, target = feature.cuda(), target.cuda()

        logit = model(feature)
        loss = F.cross_entropy(logit, target, size_average=False)

        avg_loss += loss.data[0]
        corrects += (torch.max(logit, 1)
                     [1].view(target.size()).data == target.data).sum()

    size = len(data_iter.dataset)
    avg_loss = loss.data[0] / size
    accuracy = 100.0 * corrects / size
    model.train()
    print('\nEvaluation - loss: {:.6f}  acc: {:.4f}%({}/{}) \n'.format(avg_loss,
                                                                       accuracy,
                                                                       corrects,
                                                                            size))
    return accuracy, avg_loss
runner.py 文件源码 项目:semanaly 作者: zqhZY 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def eval(data_iter, model, args):
    model.eval()
    corrects, avg_loss = 0, 0
    for batch in data_iter:
        feature, target = batch.text, batch.labels
        target.data.sub_(1)  # batch first, index align

        x = feature.data.numpy()
        x = x.T
        feature = autograd.Variable(torch.from_numpy(x))

        if args.cuda:
            feature, target = feature.cuda(), target.cuda()

        logit = model(feature)
        loss = F.cross_entropy(logit, target, size_average=False)

        avg_loss += loss.data[0]
        corrects += (torch.max(logit, 1)
                     [1].view(target.size()).data == target.data).sum()

    size = len(data_iter.dataset)
    avg_loss = loss.data[0] / size
    accuracy = 100.0 * corrects / size
    model.train()
    print('\nEvaluation - loss: {:.6f}  acc: {:.4f}%({}/{}) \n'.format(avg_loss,
                                                                       accuracy,
                                                                       corrects,
                                                                        size))


问题


面经


文章

微信
公众号

扫码关注公众号