def stylize(args):
content_image = utils.load_image(args.content_image, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0)
if args.cuda:
content_image = content_image.cuda()
content_image = Variable(content_image, volatile=True)
style_model = TransformerNet()
style_model.load_state_dict(torch.load(args.model))
if args.cuda:
style_model.cuda()
output = style_model(content_image)
if args.cuda:
output = output.cpu()
output_data = output.data[0]
utils.save_image(args.output_image, output_data)
python类Lambda()的实例源码
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSizeX, opt.loadSizeY]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSizeX)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
base_dataset.py 文件源码
项目:CycleGANwithPerceptionLoss
作者: EliasVansteenkiste
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform(resize_crop='resize_and_crop', flip=True,
loadSize=286, fineSize=256):
transform_list = []
if resize_crop == 'resize_and_crop':
osize = [loadSize, loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(fineSize))
elif resize_crop == 'crop':
transform_list.append(transforms.RandomCrop(fineSize))
elif resize_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, fineSize)))
elif resize_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, loadSize)))
transform_list.append(transforms.RandomCrop(fineSize))
if flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def jitter(x):
ran = random.uniform(0.7, 1)
return x * ran + 1 - ran
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Lambda(jitter),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def toTensor(self, img):
encode = transforms.Compose([transforms.Scale(self.img_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[1,1,1]),
transforms.Lambda(lambda x: x.mul_(255)),
])
return encode(Image.open(img))
def tensor2img(self, tensor):
decode = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1./255)),
transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],
std=[1,1,1]),
transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]),
])
tensor = decode(tensor)
loader = transforms.Compose([transforms.ToPILImage()])
img = loader(tensor.clamp_(0, 1))
img.save(self.img_path + "/result.jpg")
def get_hue_transform(dim, mean_values):
swap = (2, 1, 0)
return transforms.Compose([
transforms.Scale(dim),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255)),
RandomHue(),
SwapChannels(swap),
transforms.Normalize(mean_values, (1, 1, 1))
])
def get_advanced_transform(dim, mean_values):
# loader must be cv2 loader
swap = (2, 1, 0)
return transforms.Compose([
Scale(dim),
Padding(5),
RandomCrop(5),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255)),
RandomHue(),
SwapChannels(swap),
transforms.Normalize(mean_values, (1, 1, 1))
])