def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
return transforms.Compose(t_list)
python类Normalize()的实例源码
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
padding = int((scale_size - input_size) / 2)
return transforms.Compose([
transforms.RandomCrop(input_size, padding=padding),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
])
def inception_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomSizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(224, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def jitter(x):
ran = random.uniform(0.7, 1)
return x * ran + 1 - ran
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Lambda(jitter),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def get_loader(config):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
transform = transforms.Compose([
transforms.Scale(config.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='train')
mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=True)
svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='test')
mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=False)
svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
return svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
transformations = [transforms.Scale(opt.loadSize),
transforms.RandomCrop(opt.fineSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
# Dataset A
dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A',
transform=transform, return_paths=True)
data_loader_A = torch.utils.data.DataLoader(
dataset_A,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
# Dataset B
dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B',
transform=transform, return_paths=True)
data_loader_B = torch.utils.data.DataLoader(
dataset_B,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset_A = dataset_A
self.dataset_B = dataset_B
flip = opt.isTrain and not opt.no_flip
self.paired_data = PairedData(data_loader_A, data_loader_B,
self.opt.max_dataset_size, flip)
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.fineSize = opt.fineSize
transformations = [
# TODO: Scale
transforms.Scale(opt.loadSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
# Dataset A
dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase,
transform=transform, return_paths=True)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset = dataset
flip = opt.isTrain and not opt.no_flip
self.paired_data = PairedData(data_loader, opt.fineSize,
opt.max_dataset_size, flip)
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_AB = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir_AB))
#assert(opt.resize_or_crop == 'resize_and_crop')
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
self.transform = transforms.Compose(transform_list)
def main():
training_batch_size = 352
validation_batch_size = 352
net = get_res152(num_classes=num_classes, snapshot_path=os.path.join(
ckpt_path, 'epoch_15_validation_loss_0.0772_iter_1000.pth')).cuda()
net.eval()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.311, 0.340, 0.299], [0.167, 0.144, 0.138])
])
criterion = nn.MultiLabelSoftMarginLoss().cuda()
train_set = MultipleClassImageFolder(split_train_dir, transform)
train_loader = DataLoader(train_set, batch_size=training_batch_size, num_workers=16)
batch_outputs, batch_labels = predict(net, train_loader)
loss = criterion(batch_outputs, batch_labels)
print 'training loss %.4f' % loss.cpu().data.numpy()[0]
batch_outputs = batch_outputs.cpu().data.numpy()
batch_labels = batch_labels.cpu().data.numpy()
thretholds = find_best_threthold(batch_outputs, batch_labels)
val_set = MultipleClassImageFolder(split_val_dir, transform)
val_loader = DataLoader(val_set, batch_size=validation_batch_size, num_workers=16)
batch_outputs, batch_labels = predict(net, val_loader)
loss = criterion(batch_outputs, batch_labels)
print 'validation loss %.4f' % loss.cpu().data.numpy()[0]
batch_outputs = batch_outputs.cpu().data.numpy()
batch_labels = batch_labels.cpu().data.numpy()
sio.savemat('./val_output.mat', {'outputs': batch_outputs, 'labels': batch_labels})
prediction = get_one_hot_prediction(batch_outputs, thretholds)
evaluation = evaluate(prediction, batch_labels)
print 'validation evaluation: accuracy %.4f, precision %.4f, recall %.4f, f2 %.4f' % (
evaluation[0], evaluation[1], evaluation[2], evaluation[3])
def toTensor(self, img):
encode = transforms.Compose([transforms.Scale(self.img_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[1,1,1]),
transforms.Lambda(lambda x: x.mul_(255)),
])
return encode(Image.open(img))
def tensor2img(self, tensor):
decode = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1./255)),
transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],
std=[1,1,1]),
transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
])
tensor = decode(tensor)
loader = transforms.Compose([transforms.ToPILImage()])
img = loader(tensor.clamp_(0, 1))
img.save(self.img_path + "/result.jpg")
def preprocess_torch(image_path, size):
# PIL reads image in RGB format
normalize = transforms.Normalize(mean=MEAN, std=STD)
transformer = transforms.Compose([
transforms.ToTensor(),
normalize])
image = Image.open(image_path)
image = image.resize(size)
image = Variable(transformer(image), requires_grad=False)
image = image.unsqueeze(0)
return image
def get_cifar10_loaders(root_directory, train_batch_size=128, test_batch_size=100,
download=False):
# Data preparation for CIFAR10. Borrowed from
# https://github.com/kuangliu/pytorch-cifar/blob/master/main.py
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
train=True, download=download,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
train=False, download=download,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=2)
return trainloader, testloader
def get_loader(image_path, image_size, batch_size, num_workers=2):
"""Builds and returns Dataloader."""
transform = transforms.Compose([
transforms.Scale(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset = ImageFolder(image_path, transform)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
return data_loader
def get_mnist(train):
"""Get MNIST dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,
std=params.dataset_std)])
# dataset and data loader
mnist_dataset = datasets.MNIST(root=params.data_root,
train=train,
transform=pre_process,
download=True)
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=params.batch_size,
shuffle=True)
return mnist_data_loader