python类Normalize()的实例源码

preprocess.py 文件源码 项目:convNet.pytorch 作者: eladhoffer 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
    t_list = [
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        transforms.Normalize(**normalize),
    ]
    if scale_size != input_size:
        t_list = [transforms.Scale(scale_size)] + t_list

    return transforms.Compose(t_list)
preprocess.py 文件源码 项目:convNet.pytorch 作者: eladhoffer 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
    t_list = [
        transforms.RandomCrop(input_size),
        transforms.ToTensor(),
        transforms.Normalize(**normalize),
    ]
    if scale_size != input_size:
        t_list = [transforms.Scale(scale_size)] + t_list

    transforms.Compose(t_list)
preprocess.py 文件源码 项目:convNet.pytorch 作者: eladhoffer 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
    padding = int((scale_size - input_size) / 2)
    return transforms.Compose([
        transforms.RandomCrop(input_size, padding=padding),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**normalize),
    ])
preprocess.py 文件源码 项目:convNet.pytorch 作者: eladhoffer 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def inception_preproccess(input_size, normalize=__imagenet_stats):
    return transforms.Compose([
        transforms.RandomSizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**normalize)
    ])
greyData.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
nvData.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
sqData.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(224, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
opData.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
proData.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    def jitter(x):
        ran = random.uniform(0.7, 1)
        return x * ran + 1 - ran

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Lambda(jitter),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
data_loader.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_loader(config):
    """Builds and returns Dataloader for MNIST and SVHN dataset."""

    transform = transforms.Compose([
                    transforms.Scale(config.image_size),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='train')
    mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=True)

    svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='test')
    mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=False)

    svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
                                              batch_size=config.batch_size,
                                              shuffle=True,
                                              num_workers=config.num_workers)

    mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=config.num_workers)


    svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test,
                                              batch_size=config.batch_size,
                                              shuffle=False,
                                              num_workers=config.num_workers)

    mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test,
                                               batch_size=config.batch_size,
                                               shuffle=False,
                                               num_workers=config.num_workers)

    return svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader
unaligned_data_loader.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def initialize(self, opt):
        BaseDataLoader.initialize(self, opt)
        transformations = [transforms.Scale(opt.loadSize),
                           transforms.RandomCrop(opt.fineSize),
                           transforms.ToTensor(),
                           transforms.Normalize((0.5, 0.5, 0.5),
                                                (0.5, 0.5, 0.5))]
        transform = transforms.Compose(transformations)

        # Dataset A
        dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A',
                                transform=transform, return_paths=True)
        data_loader_A = torch.utils.data.DataLoader(
            dataset_A,
            batch_size=self.opt.batchSize,
            shuffle=not self.opt.serial_batches,
            num_workers=int(self.opt.nThreads))

        # Dataset B
        dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B',
                                transform=transform, return_paths=True)
        data_loader_B = torch.utils.data.DataLoader(
            dataset_B,
            batch_size=self.opt.batchSize,
            shuffle=not self.opt.serial_batches,
            num_workers=int(self.opt.nThreads))
        self.dataset_A = dataset_A
        self.dataset_B = dataset_B
        flip = opt.isTrain and not opt.no_flip
        self.paired_data = PairedData(data_loader_A, data_loader_B, 
                                      self.opt.max_dataset_size, flip)
aligned_data_loader.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def initialize(self, opt):
        BaseDataLoader.initialize(self, opt)
        self.fineSize = opt.fineSize

        transformations = [
            # TODO: Scale
            transforms.Scale(opt.loadSize),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5),
                                 (0.5, 0.5, 0.5))]
        transform = transforms.Compose(transformations)

        # Dataset A
        dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase,
                              transform=transform, return_paths=True)
        data_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=self.opt.batchSize,
            shuffle=not self.opt.serial_batches,
            num_workers=int(self.opt.nThreads))

        self.dataset = dataset

        flip = opt.isTrain and not opt.no_flip
        self.paired_data = PairedData(data_loader, opt.fineSize, 
                                      opt.max_dataset_size, flip)
aligned_dataset.py 文件源码 项目:DeblurGAN 作者: KupynOrest 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def initialize(self, opt):
        self.opt = opt
        self.root = opt.dataroot
        self.dir_AB = os.path.join(opt.dataroot, opt.phase)

        self.AB_paths = sorted(make_dataset(self.dir_AB))

        #assert(opt.resize_or_crop == 'resize_and_crop')

        transform_list = [transforms.ToTensor(),
                          transforms.Normalize((0.5, 0.5, 0.5),
                                               (0.5, 0.5, 0.5))]

        self.transform = transforms.Compose(transform_list)
find_best_threthold.py 文件源码 项目:kaggle-planet 作者: ZijunDeng 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def main():
    training_batch_size = 352
    validation_batch_size = 352

    net = get_res152(num_classes=num_classes, snapshot_path=os.path.join(
        ckpt_path, 'epoch_15_validation_loss_0.0772_iter_1000.pth')).cuda()
    net.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.311, 0.340, 0.299], [0.167, 0.144, 0.138])
    ])
    criterion = nn.MultiLabelSoftMarginLoss().cuda()

    train_set = MultipleClassImageFolder(split_train_dir, transform)
    train_loader = DataLoader(train_set, batch_size=training_batch_size, num_workers=16)
    batch_outputs, batch_labels = predict(net, train_loader)
    loss = criterion(batch_outputs, batch_labels)
    print 'training loss %.4f' % loss.cpu().data.numpy()[0]
    batch_outputs = batch_outputs.cpu().data.numpy()
    batch_labels = batch_labels.cpu().data.numpy()
    thretholds = find_best_threthold(batch_outputs, batch_labels)

    val_set = MultipleClassImageFolder(split_val_dir, transform)
    val_loader = DataLoader(val_set, batch_size=validation_batch_size, num_workers=16)
    batch_outputs, batch_labels = predict(net, val_loader)
    loss = criterion(batch_outputs, batch_labels)
    print 'validation loss %.4f' % loss.cpu().data.numpy()[0]
    batch_outputs = batch_outputs.cpu().data.numpy()
    batch_labels = batch_labels.cpu().data.numpy()
    sio.savemat('./val_output.mat', {'outputs': batch_outputs, 'labels': batch_labels})
    prediction = get_one_hot_prediction(batch_outputs, thretholds)
    evaluation = evaluate(prediction, batch_labels)
    print 'validation evaluation: accuracy %.4f, precision %.4f, recall %.4f, f2 %.4f' % (
        evaluation[0], evaluation[1], evaluation[2], evaluation[3])
img_loader.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def toTensor(self, img):
        encode = transforms.Compose([transforms.Scale(self.img_size),
               transforms.ToTensor(),
               transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
               transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[1,1,1]),
               transforms.Lambda(lambda x: x.mul_(255)),
            ])

        return encode(Image.open(img))
img_loader.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def tensor2img(self, tensor):
        decode = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1./255)),
               transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],
                                    std=[1,1,1]),
               transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
               ])
        tensor = decode(tensor)

        loader = transforms.Compose([transforms.ToPILImage()])
        img = loader(tensor.clamp_(0, 1))

        img.save(self.img_path + "/result.jpg")
image_process.py 文件源码 项目:neural-style 作者: ctliu3 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def preprocess_torch(image_path, size):
    # PIL reads image in RGB format
    normalize = transforms.Normalize(mean=MEAN, std=STD)
    transformer = transforms.Compose([
        transforms.ToTensor(),
        normalize])

    image = Image.open(image_path)
    image = image.resize(size)
    image = Variable(transformer(image), requires_grad=False)
    image = image.unsqueeze(0)
    return image
cifar10.py 文件源码 项目:inferno 作者: inferno-pytorch 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_cifar10_loaders(root_directory, train_batch_size=128, test_batch_size=100,
                        download=False):
    # Data preparation for CIFAR10. Borrowed from
    # https://github.com/kuangliu/pytorch-cifar/blob/master/main.py
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
                                            train=True, download=download,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
                                              shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
                                           train=False, download=download,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
                                             shuffle=False, num_workers=2)
    return trainloader, testloader
data_loader.py 文件源码 项目:pytorch-tutorial 作者: yunjey 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_loader(image_path, image_size, batch_size, num_workers=2):
    """Builds and returns Dataloader."""

    transform = transforms.Compose([
                    transforms.Scale(image_size),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    dataset = ImageFolder(image_path, transform)
    data_loader = data.DataLoader(dataset=dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers)
    return data_loader
mnist.py 文件源码 项目:pytorch-arda 作者: corenel 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_mnist(train):
    """Get MNIST dataset loader."""
    # image pre-processing
    pre_process = transforms.Compose([transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=params.dataset_mean,
                                          std=params.dataset_std)])

    # dataset and data loader
    mnist_dataset = datasets.MNIST(root=params.data_root,
                                   train=train,
                                   transform=pre_process,
                                   download=True)

    mnist_data_loader = torch.utils.data.DataLoader(
        dataset=mnist_dataset,
        batch_size=params.batch_size,
        shuffle=True)

    return mnist_data_loader


问题


面经


文章

微信
公众号

扫码关注公众号