def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
transformations = [transforms.Scale(opt.loadSize),
transforms.RandomCrop(opt.fineSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
# Dataset A
dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A',
transform=transform, return_paths=True)
data_loader_A = torch.utils.data.DataLoader(
dataset_A,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
# Dataset B
dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B',
transform=transform, return_paths=True)
data_loader_B = torch.utils.data.DataLoader(
dataset_B,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset_A = dataset_A
self.dataset_B = dataset_B
flip = opt.isTrain and not opt.no_flip
self.paired_data = PairedData(data_loader_A, data_loader_B,
self.opt.max_dataset_size, flip)
python类Scale()的实例源码
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.fineSize = opt.fineSize
transformations = [
# TODO: Scale
transforms.Scale(opt.loadSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
# Dataset A
dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase,
transform=transform, return_paths=True)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset = dataset
flip = opt.isTrain and not opt.no_flip
self.paired_data = PairedData(data_loader, opt.fineSize,
opt.max_dataset_size, flip)
def toTensor(self, img):
encode = transforms.Compose([transforms.Scale(self.img_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[1,1,1]),
transforms.Lambda(lambda x: x.mul_(255)),
])
return encode(Image.open(img))
def __init__(self, path, img_size, batch_size, is_cuda):
self._img_files = os.listdir(path)
self._path = path
self._is_cuda = is_cuda
self._step = 0
self._batch_size = batch_size
self.sents_size = len(self._img_files)
self._stop_step = self.sents_size // batch_size
self._encode = transforms.Compose([
transforms.Scale(img_size),
transforms.RandomCrop(img_size),
transforms.ToTensor()
])
def get_loader(image_path, image_size, batch_size, num_workers=2):
"""Builds and returns Dataloader."""
transform = transforms.Compose([
transforms.Scale(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset = ImageFolder(image_path, transform)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
return data_loader
def returnTF():
# load the image transformer
tf = trn.Compose([
trn.Scale((224,224)),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return tf
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
return transforms.Compose(t_list)
def default_inception_transform(img_size):
tf = transforms.Compose([
transforms.Scale(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
LeNormalize(),
])
return tf
def default_transform(size):
transform = transforms.Compose([
transforms.Scale(size),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], # resnet imagnet
std=[0.229, 0.224, 0.225])
])
return transform
def img_transform(crop_size, upscale_factor=1):
return transforms.Compose([
transforms.Scale(crop_size // upscale_factor),
transforms.CenterCrop(crop_size // upscale_factor),
transforms.ToTensor()])
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
transformations = [transforms.Scale(opt.loadSize),
transforms.RandomCrop(opt.fineSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
# Dataset A
dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A',
transform=transform, return_paths=True)
data_loader_A = torch.utils.data.DataLoader(
dataset_A,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
# Dataset B
dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B',
transform=transform, return_paths=True)
data_loader_B = torch.utils.data.DataLoader(
dataset_B,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset_A = dataset_A
self.dataset_B = dataset_B
flip = opt.isTrain and not opt.no_flip
self.paired_data = PairedData(data_loader_A, data_loader_B,
self.opt.max_dataset_size, flip)
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.fineSize = opt.fineSize
transformations = [
# TODO: Scale
transforms.Scale(opt.loadSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
# Dataset A
dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase,
transform=transform, return_paths=True)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset = dataset
flip = opt.isTrain and not opt.no_flip
self.paired_data = PairedData(data_loader, opt.fineSize,
opt.max_dataset_size, flip)
def MNIST_loader(root, image_size, normalize=True):
"""
Function to load torchvision dataset object based on just image size
Args:
root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location
image_size = Size of every image
normalize = Requirement to normalize the image. Default is true
"""
transformations = [transforms.Scale(image_size), transforms.ToTensor()]
if normalize == True:
transformations.append(transforms.Normalize((0.5, ), (0.5, )))
mnist_data = dset.MNIST(root=root, download=True, transform=transforms.Compose(transformations))
return mnist_data
def CIFAR10_loader(root, image_size, normalize=True):
"""
Function to load torchvision dataset object based on just image size
Args:
root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location
image_size = Size of every image
normalize = Requirement to normalize the image. Default is true
"""
transformations = [transforms.Scale(image_size), transforms.ToTensor()]
if normalize == True:
transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
cifar10_data = dset.CIFAR10(root=root, download=True, transform=transforms.Compose(transformations))
return cifar10_data
def CUB200_2010_loader(root, image_size, normalize=True):
"""
Function to load torchvision dataset object based on just image size
Args:
root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location
image_size = Size of every image
normalize = Requirement to normalize the image. Default is true
"""
transformations = [transforms.Scale(image_size), transforms.ToTensor()]
if normalize == True:
transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
cub200_2010_data = CUB2002010(root=root, download=True, transform=transforms.Compose(transformations))
return cub200_2010_data
def FASHIONMNIST_loader(root, image_size, normalize=True):
"""
Function to load torchvision dataset object based on just image size
Args:
root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location
image_size = Size of every image
normalize = Requirement to normalize the image. Default is true
"""
transformations = [transforms.Scale(image_size), transforms.ToTensor()]
if normalize == True:
transformations.append(transforms.Normalize((0.5, ), (0.5, )))
fash_mnist_data = dset.FashionMNIST(root=root, download=True, transform=transforms.Compose(transformations))
return fash_mnist_data
def __init__(self, opt):
transform_list = []
if (opt.crop_height > 0) and (opt.crop_width > 0):
transform_list.append(transforms.CenterCrop(opt.crop_height, crop_width))
elif opt.crop_size > 0:
transform_list.append(transforms.CenterCrop(opt.crop_size))
transform_list.append(transforms.Scale(opt.image_size))
transform_list.append(transforms.CenterCrop(opt.image_size))
transform_list.append(transforms.ToTensor())
if opt.dataset == 'cifar10':
dataset1 = datasets.CIFAR10(root = opt.dataroot, download = True,
transform = transforms.Compose(transform_list))
dataset2 = datasets.CIFAR10(root = opt.dataroot, train = False,
transform = transforms.Compose(transform_list))
def get_data(k):
if k < len(dataset1):
return dataset1[k][0]
else:
return dataset2[k - len(dataset1)][0]
else:
if opt.dataset in ['imagenet', 'folder', 'lfw']:
dataset = datasets.ImageFolder(root = opt.dataroot,
transform = transforms.Compose(transform_list))
elif opt.dataset == 'lsun':
dataset = datasets.LSUN(db_path = opt.dataroot, classes = [opt.lsun_class + '_train'],
transform = transforms.Compose(transform_list))
def get_data(k):
return dataset[k][0]
data_index = torch.load(os.path.join(opt.dataroot, 'data_index.pt'))
train_index = data_index['train']
self.opt = opt
self.get_data = get_data
self.train_index = data_index['train']
self.counter = 0
def get_dataloader(opt):
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Scale(opt.imageScaleSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'lsun':
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Scale(opt.imageScaleSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
])
)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size,
shuffle=True,
num_workers=int(opt.workers))
return dataloader
def Imagenet_LMDB_generate(imagenet_dir, output_dir, make_val=False, make_train=False):
# the imagenet_dir should have direction named 'train' or 'val',with 1000 folders of raw jpeg photos
train_name = 'imagenet_train_lmdb'
val_name = 'imagenet_val_lmdb'
def target_trans(target):
return target
if make_val:
val_lmdb=lmdb_datasets.LMDB_generator(osp.join(output_dir,val_name))
def trans_val_data(dir):
tensor = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])(dir)
tensor=(tensor.numpy()*255).astype(np.uint8)
return tensor
val = datasets.ImageFolder(osp.join(imagenet_dir,'val'), trans_val_data,target_trans)
val_lmdb.write_classification_lmdb(val, num_per_dataset=DATASET_SIZE)
if make_train:
train_lmdb = lmdb_datasets.LMDB_generator(osp.join(output_dir, train_name))
def trans_train_data(dir):
tensor = transforms.Compose([
transforms.Scale(256),
transforms.ToTensor()
])(dir)
tensor=(tensor.numpy()*255).astype(np.uint8)
return tensor
train = datasets.ImageFolder(osp.join(imagenet_dir, 'train'), trans_train_data, target_trans)
train.imgs=np.random.permutation(train.imgs)
train_lmdb.write_classification_lmdb(train, num_per_dataset=DATASET_SIZE, write_shape=True)