def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.9, 1.) * area
aspect_ratio = random.uniform(7. / 8, 8. / 7)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
python类Scale()的实例源码
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSizeX, opt.loadSizeY]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSizeX)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_loader(config):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
transform = transforms.Compose([
transforms.Scale(config.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform)
mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform)
svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
return svhn_loader, mnist_loader
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.9, 1.) * area
aspect_ratio = random.uniform(7. / 8, 8. / 7)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.70, 0.98) * area
aspect_ratio = random.uniform(5. / 8, 8. / 5)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.70, 0.98) * area
aspect_ratio = random.uniform(5. / 8, 8. / 5)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.9, 1.) * area
aspect_ratio = random.uniform(7. / 8, 8. / 7)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
def test_getitem(self):
import torchvision.transforms as t
from reid.datasets.viper import VIPeR
from reid.utils.data.preprocessor import Preprocessor
root, split_id, num_val = '/tmp/open-reid/viper', 0, 100
dataset = VIPeR(root, split_id=split_id, num_val=num_val, download=True)
preproc = Preprocessor(dataset.train, root=dataset.images_dir,
transform=t.Compose([
t.Scale(256),
t.CenterCrop(224),
t.ToTensor(),
t.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]))
self.assertEquals(len(preproc), len(dataset.train))
img, pid, camid = preproc[0]
self.assertEquals(img.size(), (3, 224, 224))
def load_data(self):
print('=' * 50)
print('Loading data...')
transform = transforms.Compose([
transforms.ImageOps.grayscale,
transforms.Scale((cfg.img_width, cfg.img_height)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
syn_train_folder = torchvision.datasets.ImageFolder(root=cfg.syn_path, transform=transform)
# print(syn_train_folder)
self.syn_train_loader = Data.DataLoader(syn_train_folder, batch_size=cfg.batch_size, shuffle=True,
pin_memory=True)
print('syn_train_batch %d' % len(self.syn_train_loader))
real_folder = torchvision.datasets.ImageFolder(root=cfg.real_path, transform=transform)
# real_folder.imgs = real_folder.imgs[:2000]
self.real_loader = Data.DataLoader(real_folder, batch_size=cfg.batch_size, shuffle=True,
pin_memory=True)
print('real_batch %d' % len(self.real_loader))
spatial_dataloader.py 文件源码
项目:two-stream-action-recognition
作者: jeffreyhuang1
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def validate(self):
validation_set = spatial_dataset(dic=self.dic_testing, root_dir=self.data_path, mode='val', transform = transforms.Compose([
transforms.Scale([224,224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
]))
print '==> Validation data :',len(validation_set),'frames'
print validation_set[1][1].size()
val_loader = DataLoader(
dataset=validation_set,
batch_size=self.BATCH_SIZE,
shuffle=False,
num_workers=self.num_workers)
return val_loader
motion_dataloader.py 文件源码
项目:two-stream-action-recognition
作者: jeffreyhuang1
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def train(self):
training_set = motion_dataset(dic=self.dic_video_train, in_channel=self.in_channel, root_dir=self.data_path,
mode='train',
transform = transforms.Compose([
transforms.Scale([224,224]),
transforms.ToTensor(),
]))
print '==> Training data :',len(training_set),' videos',training_set[1][0].size()
train_loader = DataLoader(
dataset=training_set,
batch_size=self.BATCH_SIZE,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True
)
return train_loader
motion_dataloader.py 文件源码
项目:two-stream-action-recognition
作者: jeffreyhuang1
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def val(self):
validation_set = motion_dataset(dic= self.dic_test_idx, in_channel=self.in_channel, root_dir=self.data_path ,
mode ='val',
transform = transforms.Compose([
transforms.Scale([224,224]),
transforms.ToTensor(),
]))
print '==> Validation data :',len(validation_set),' frames',validation_set[1][1].size()
#print validation_set[1]
val_loader = DataLoader(
dataset=validation_set,
batch_size=self.BATCH_SIZE,
shuffle=False,
num_workers=self.num_workers)
return val_loader
def imagenet_transform(scale_size=256, input_size=224, train=True, allow_var_size=False):
normalize = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
if train:
return transforms.Compose([
transforms.Scale(scale_size),
transforms.RandomCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
elif allow_var_size:
return transforms.Compose([
transforms.Scale(scale_size),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
else:
return transforms.Compose([
transforms.Scale(scale_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
base_dataset.py 文件源码
项目:CycleGANwithPerceptionLoss
作者: EliasVansteenkiste
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def load_image_for_prediction(opt, image_path):
"""
load image for prediction, pre process as the same of train, and also return a dict
:param opt:
:param image_path:
:return:
"""
image = Image.open(image_path)
transformations = transforms.Compose([transforms.Scale(opt.loadSize),
transforms.RandomCrop(opt.fineSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])
image_tensor = transformations(image).float()
image_tensor.unsqueeze_(0)
return {'A': image_tensor, 'A_paths': image_path,
'B': image_tensor, 'B_paths': image_path}
def LSUN_loader(root, image_size, classes=['bedroom'], normalize=True):
"""
Function to load torchvision dataset object based on just image size
Args:
root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location
image_size = Size of every image
classes = Default class is 'bedroom'. Other available classes are:
'bridge', 'church_outdoor', 'classroom', 'conference_room', 'dining_room', 'kitchen', 'living_room', 'restaurant', 'tower'
normalize = Requirement to normalize the image. Default is true
"""
transformations = [transforms.Scale(image_size), transforms.CenterCrop(image_size), transforms.ToTensor()]
if normalize == True:
transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
for c in classes:
c = c + '_train'
lsun_data = dset.LSUN(db_path=root, classes=classes, transform=transforms.Compose(transformations))
return lsun_data
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def transform(is_train=True, normalize=True):
"""
Returns a transform object
"""
filters = []
filters.append(Scale(256))
if is_train:
filters.append(RandomCrop(224))
else:
filters.append(CenterCrop(224))
if is_train:
filters.append(RandomHorizontalFlip())
filters.append(ToTensor())
if normalize:
filters.append(Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False):
self.crop_size = crop_size
self.y_offset = y_offset
self.flip = flip
if self.flip:
self.post_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(size = 224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.post_transform = transforms.Compose([
transforms.Scale(size = 224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def transform_input(crop_size, upscale_factor):
"""LR of target image
"""
return Compose([
Scale(crop_size // upscale_factor),
])
# def transform_target_batch(crop_size):
# def transform(image):
# patches = extract_subimages(image, crop_size, crop_size)
# patches = [ToTensor()(x) for x in patches]
# return stack(patches, 0)
# return transform
# def transform_input_batch(crop_size, upscale_factor):
# def transform(image):
# patches = extract_subimages(image, crop_size, crop_size)
# patches = [Compose([Scale(crop_size//upscale_factor), ToTensor()])(x) for x in patches]
# return stack(patches, 0)
# return transform
def load_data(resize):
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(max(resize)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
#Higher scale-up for inception
transforms.Scale(int(max(resize)/224*256)),
transforms.CenterCrop(max(resize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'PlantVillage'
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size,
shuffle=True)
for x in ['train', 'val']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
dset_classes = dsets['train'].classes
return dset_loaders['train'], dset_loaders['val']
def __init__(self, env):
super(CartPoleWrapper, self).__init__()
self.env = env.unwrapped
self.resize = T.Compose([T.ToPILImage(),
T.Scale(40, interpolation=Image.CUBIC),
T.ToTensor()])
self.screen_width = 600
self.action_space = self.env.action_space
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
transforms.Compose(t_list)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt):
random.seed(opt.manualSeed)
# folder dataset
CTrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
VTrans = transforms.Compose([
RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def jitter(x):
ran = random.uniform(0.7, 1)
return x * ran + 1 - ran
STrans = transforms.Compose([
transforms.Scale(opt.imageSize, Image.BICUBIC),
transforms.ToTensor(),
transforms.Lambda(jitter),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(rootC=opt.datarootC,
rootS=opt.datarootS,
transform=CTrans,
vtransform=VTrans,
stransform=STrans
)
assert dataset
return data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), drop_last=True)
def get_loader(config):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
transform = transforms.Compose([
transforms.Scale(config.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='train')
mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=True)
svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='test')
mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=False)
svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
return svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader