def test():
import torchvision
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])
dataset = ListDataset(root='/mnt/hgfs/D/download/PASCAL_VOC/voc_all_images',
list_file='./data/voc12_train.txt', train=True, transform=transform, input_size=600)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn)
for images, loc_targets, cls_targets in dataloader:
print(images.size())
print(loc_targets.size())
print(cls_targets.size())
grid = torchvision.utils.make_grid(images, 1)
torchvision.utils.save_image(grid, 'a.jpg')
break
# test()
python类ToTensor()的实例源码
def train(rank, args, model):
torch.manual_seed(args.seed + rank)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train_epoch(epoch, args, model, train_loader, optimizer)
test_epoch(model, test_loader)
def data_loader(image_root, data_list, shuffle=True, batch_size=64, workers=20, is_cuda=True, is_visualization=False):
kwargs = {'num_workers': workers, 'pin_memory': True} if is_cuda else {}
transform=transforms.Compose([
trans.person_crop(ratio=(1, 0.75),crop_type=1),\
trans.scale(size=(64, 128)),\
transforms.ToTensor()
])
preid = dataset.listDataset(
image_root,
data_list,
shuffle,
transform=transform,
is_visualization=is_visualization)
data_loader = torch.utils.data.DataLoader(preid,
batch_size=batch_size,
shuffle=True,
**kwargs)
return data_loader
def get_transform(resize_crop='resize_and_crop', flip=True,
loadSize=286, fineSize=256):
transform_list = []
if resize_crop == 'resize_and_crop':
osize = [loadSize, loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(fineSize))
elif resize_crop == 'crop':
transform_list.append(transforms.RandomCrop(fineSize))
elif resize_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, fineSize)))
elif resize_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, loadSize)))
transform_list.append(transforms.RandomCrop(fineSize))
if flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def transform(is_train=True, normalize=True):
"""
Returns a transform object
"""
filters = []
filters.append(Scale(256))
if is_train:
filters.append(RandomCrop(224))
else:
filters.append(CenterCrop(224))
if is_train:
filters.append(RandomHorizontalFlip())
filters.append(ToTensor())
if normalize:
filters.append(Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False):
self.crop_size = crop_size
self.y_offset = y_offset
self.flip = flip
if self.flip:
self.post_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(size = 224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.post_transform = transforms.Compose([
transforms.Scale(size = 224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def test(argv=sys.argv[1:]):
input = "../dataset/BSDS300/images/val/54082.jpg"
#input = "../dataset/BSDS300/images/val/159008.jpg"
output = "sr_{}".format(basename(input)) # save in cwd
output2 = "sr__{}".format(basename(input))
model = "snapshot/gnet-epoch-1-pretrain.pth"
#model = "snapshot/gnet-epoch-200.pth"
cuda = True
img = Image.open(input)
width, height = img.size
gennet = torch.load(model)
img = ToTensor()(img) # [c,w,h]->[1,c,h,w]
input = Variable(img).view(1, 3, height, width)
if cuda:
gennet = gennet.cuda()
input = input.cuda()
pred = gennet(input).cpu()
save_image(pred.data, output)
#ToPILImage()(pred.data).save(output)
toImage(pred).save(output2)
def transform_input(crop_size, upscale_factor):
"""LR of target image
"""
return Compose([
Scale(crop_size // upscale_factor),
])
# def transform_target_batch(crop_size):
# def transform(image):
# patches = extract_subimages(image, crop_size, crop_size)
# patches = [ToTensor()(x) for x in patches]
# return stack(patches, 0)
# return transform
# def transform_input_batch(crop_size, upscale_factor):
# def transform(image):
# patches = extract_subimages(image, crop_size, crop_size)
# patches = [Compose([Scale(crop_size//upscale_factor), ToTensor()])(x) for x in patches]
# return stack(patches, 0)
# return transform
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def load_labels(data_dir,resize=(224,224)):
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(max(resize)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms[x])
for x in ['train']}
return (dsets['train'].classes)
def load_data(resize):
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(max(resize)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
#Higher scale-up for inception
transforms.Scale(int(max(resize)/224*256)),
transforms.CenterCrop(max(resize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'PlantVillage'
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size,
shuffle=True)
for x in ['train', 'val']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
dset_classes = dsets['train'].classes
return dset_loaders['train'], dset_loaders['val']
def __init__(self, env):
super(CartPoleWrapper, self).__init__()
self.env = env.unwrapped
self.resize = T.Compose([T.ToPILImage(),
T.Scale(40, interpolation=Image.CUBIC),
T.ToTensor()])
self.screen_width = 600
self.action_space = self.env.action_space
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
padding = int((scale_size - input_size) / 2)
return transforms.Compose([
transforms.RandomCrop(input_size, padding=padding),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
])
def inception_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomSizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(224, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)