def train(rank, args, model):
torch.manual_seed(args.seed + rank)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train_epoch(epoch, args, model, train_loader, optimizer)
test_epoch(model, test_loader)
python类Compose()的实例源码
def data_loader(image_root, data_list, shuffle=True, batch_size=64, workers=20, is_cuda=True, is_visualization=False):
kwargs = {'num_workers': workers, 'pin_memory': True} if is_cuda else {}
transform=transforms.Compose([
trans.person_crop(ratio=(1, 0.75),crop_type=1),\
trans.scale(size=(64, 128)),\
transforms.ToTensor()
])
preid = dataset.listDataset(
image_root,
data_list,
shuffle,
transform=transform,
is_visualization=is_visualization)
data_loader = torch.utils.data.DataLoader(preid,
batch_size=batch_size,
shuffle=True,
**kwargs)
return data_loader
def get_transform(resize_crop='resize_and_crop', flip=True,
loadSize=286, fineSize=256):
transform_list = []
if resize_crop == 'resize_and_crop':
osize = [loadSize, loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(fineSize))
elif resize_crop == 'crop':
transform_list.append(transforms.RandomCrop(fineSize))
elif resize_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, fineSize)))
elif resize_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, loadSize)))
transform_list.append(transforms.RandomCrop(fineSize))
if flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image),
'landmarks': torch.from_numpy(landmarks)}
######################################################################
# Compose transforms
# ~~~~~~~~~~~~~~~~~~
#
# Now, we apply the transforms on an sample.
#
# Let's say we want to rescale the shorter side of the image to 256 and
# then randomly crop a square of size 224 from it. i.e, we want to compose
# ``Rescale`` and ``RandomCrop`` transforms.
# ``torchvision.transforms.Compose`` is a simple callable class which allows us
# to do this.
#
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def transform(is_train=True, normalize=True):
"""
Returns a transform object
"""
filters = []
filters.append(Scale(256))
if is_train:
filters.append(RandomCrop(224))
else:
filters.append(CenterCrop(224))
if is_train:
filters.append(RandomHorizontalFlip())
filters.append(ToTensor())
if normalize:
filters.append(Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False):
self.crop_size = crop_size
self.y_offset = y_offset
self.flip = flip
if self.flip:
self.post_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(size = 224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.post_transform = transforms.Compose([
transforms.Scale(size = 224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def transform_input(crop_size, upscale_factor):
"""LR of target image
"""
return Compose([
Scale(crop_size // upscale_factor),
])
# def transform_target_batch(crop_size):
# def transform(image):
# patches = extract_subimages(image, crop_size, crop_size)
# patches = [ToTensor()(x) for x in patches]
# return stack(patches, 0)
# return transform
# def transform_input_batch(crop_size, upscale_factor):
# def transform(image):
# patches = extract_subimages(image, crop_size, crop_size)
# patches = [Compose([Scale(crop_size//upscale_factor), ToTensor()])(x) for x in patches]
# return stack(patches, 0)
# return transform
def load_labels(data_dir,resize=(224,224)):
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(max(resize)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms[x])
for x in ['train']}
return (dsets['train'].classes)
def load_data(resize):
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(max(resize)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
#Higher scale-up for inception
transforms.Scale(int(max(resize)/224*256)),
transforms.CenterCrop(max(resize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'PlantVillage'
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size,
shuffle=True)
for x in ['train', 'val']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
dset_classes = dsets['train'].classes
return dset_loaders['train'], dset_loaders['val']
def __init__(self, env):
super(CartPoleWrapper, self).__init__()
self.env = env.unwrapped
self.resize = T.Compose([T.ToPILImage(),
T.Scale(40, interpolation=Image.CUBIC),
T.ToTensor()])
self.screen_width = 600
self.action_space = self.env.action_space
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
padding = int((scale_size - input_size) / 2)
return transforms.Compose([
transforms.RandomCrop(input_size, padding=padding),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
])
def inception_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomSizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(224, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def jitter(x):
ran = random.uniform(0.7, 1)
return x * ran + 1 - ran
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Lambda(jitter),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)