def linearToPolar(img, center=None,
final_radius=None,
initial_radius=None,
phase_width=None,
interpolation=cv2.INTER_AREA, maps=None,
borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts):
'''
map a 2d (x,y) Cartesian array to a polar (r, phi) array
using opencv.remap
'''
if maps is None:
mapY, mapX = linearToPolarMaps(img.shape[:2], center, final_radius,
initial_radius, phase_width)
else:
mapY, mapX = maps
o = {'interpolation': interpolation,
'borderValue': borderValue,
'borderMode': borderMode}
o.update(opts)
return cv2.remap(img, mapY, mapX, **o)
python类BORDER_REFLECT的实例源码
def polarToLinear(img, shape=None, center=None, maps=None,
interpolation=cv2.INTER_AREA,
borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts):
'''
map a 2d polar (r, phi) polar array to a Cartesian (x,y) array
using opencv.remap
'''
if maps is None:
mapY, mapX = polarToLinearMaps(img.shape[:2], shape, center)
else:
mapY, mapX = maps
o = {'interpolation': interpolation,
'borderValue': borderValue,
'borderMode': borderMode}
o.update(opts)
return cv2.remap(img, mapY, mapX, **o)
def get_mag_avg(img):
img = np.sqrt(img)
kernels = get_kernels()
mag = np.zeros(img.shape, dtype='float32')
for kernel_filter in kernels:
gx = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[1], borderType=cv2.BORDER_REFLECT)
gy = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[0], borderType=cv2.BORDER_REFLECT)
mag += cv2.magnitude(gx, gy)
mag /= len(kernels)
return mag
def create_dataset(opt, mode):
convert = tnt.transform.compose([
lambda x: x.astype(np.float32),
T.Normalize([125.3, 123.0, 113.9], [63.0, 62.1, 66.7]),
lambda x: x.transpose(2,0,1).astype(np.float32),
torch.from_numpy,
])
train_transform = tnt.transform.compose([
T.RandomHorizontalFlip(),
T.Pad(opt.randomcrop_pad, cv2.BORDER_REFLECT),
T.RandomCrop(32),
convert,
])
ds = getattr(datasets, opt.dataset)(opt.data_root, train=mode, download=True)
smode = 'train' if mode else 'test'
ds = tnt.dataset.TensorDataset([
getattr(ds, smode+'_data'),
getattr(ds, smode+'_labels')])
return ds.transform({0: train_transform if mode else convert})
step2_train_mass_segmenter.py 文件源码
项目:kaggle_ndsb2017
作者: juliandewit
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def random_translate_img(img, xy_range, border_mode="constant"):
if random.random() > xy_range.chance:
return img
import cv2
if not isinstance(img, list):
img = [img]
org_height, org_width = img[0].shape[:2]
translate_x = random.randint(xy_range.x_min, xy_range.x_max)
translate_y = random.randint(xy_range.y_min, xy_range.y_max)
trans_matrix = numpy.float32([[1, 0, translate_x], [0, 1, translate_y]])
border_const = cv2.BORDER_CONSTANT
if border_mode == "reflect":
border_const = cv2.BORDER_REFLECT
res = []
for img_inst in img:
img_inst = cv2.warpAffine(img_inst, trans_matrix, (org_width, org_height), borderMode=border_const)
res.append(img_inst)
if len(res) == 1:
res = res[0]
xy_range.last_x = translate_x
xy_range.last_y = translate_y
return res
def create_dataset(opt, mode):
convert = tnt.transform.compose([
lambda x: x.astype(np.float32),
T.Normalize([125.3, 123.0, 113.9], [63.0, 62.1, 66.7]),
lambda x: x.transpose(2, 0, 1).astype(np.float32),
torch.from_numpy,
])
train_transform = tnt.transform.compose([
T.RandomHorizontalFlip(),
T.Pad(opt.randomcrop_pad, cv2.BORDER_REFLECT),
T.RandomCrop(32),
convert,
])
ds = getattr(datasets, opt.dataset)(
opt.data_root, train=mode, download=True)
smode = 'train' if mode else 'test'
ds = tnt.dataset.TensorDataset([
getattr(ds, smode + '_data'),
getattr(ds, smode + '_labels')])
return ds.transform({0: train_transform if mode else convert})
def create_dataset(opt, mode):
convert = tnt.transform.compose([
lambda x: x.astype(np.float32),
T.Normalize([125.3, 123.0, 113.9], [63.0, 62.1, 66.7]),
lambda x: x.transpose(2, 0, 1).astype(np.float32),
torch.from_numpy,
])
train_transform = tnt.transform.compose([
T.RandomHorizontalFlip(),
T.Pad(opt.randomcrop_pad, cv2.BORDER_REFLECT),
T.RandomCrop(32),
convert,
])
ds = getattr(datasets, opt.dataset)(
opt.data_root, train=mode, download=True)
smode = 'train' if mode else 'test'
ds = tnt.dataset.TensorDataset([
getattr(ds, smode + '_data'),
getattr(ds, smode + '_labels')])
return ds.transform({0: train_transform if mode else convert})
def test_box_filter_reflect(self):
I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32)
r = 2
ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='reflect')
ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_REFLECT)
self.assertTrue(np.array_equal(ret1, ret2))
def random_rotate(image):
cols = image.shape[1]
rows = image.shape[0]
mean_color = np.mean(image, axis=(0, 1))
angle = random.uniform(0, 90)
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
if random.randint(0, 1) == 1:
dst = cv2.warpAffine(image, M, (cols, rows), borderValue=mean_color, borderMode=cv2.BORDER_REFLECT)
else:
dst = cv2.warpAffine(image, M, (cols, rows), borderValue=mean_color)
return dst
def rotate(image, angle, interpolation=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REFLECT, borderValue=0):
'''
angle [deg]
'''
s0, s1 = image.shape
image_center = (s0 - 1) / 2., (s1 - 1) / 2.
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape,
flags=interpolation, borderMode=borderMode,
borderValue=borderValue)
return result
def fastFilter(arr, ksize=30, every=None, resize=True, fn='median',
interpolation=cv2.INTER_LANCZOS4,
smoothksize=0,
borderMode=cv2.BORDER_REFLECT):
'''
fn['nanmean', 'mean', 'nanmedian', 'median']
a fast 2d filter for large kernel sizes that also
works with nans
the computation speed is increased because only 'every'nsth position
within the median kernel is evaluated
'''
if every is None:
every = max(ksize//3, 1)
else:
assert ksize >= 3*every
s0,s1 = arr.shape[:2]
ss0 = s0//every
every = s0//ss0
ss1 = s1//every
out = np.full((ss0+1,ss1+1), np.nan)
c = {'median':_calcMedian,
'nanmedian':_calcNanMedian,
'nanmean':_calcNanMean,
'mean':_calcMean,
}[fn]
ss0,ss1 = c(arr, out, ksize, every)
out = out[:ss0,:ss1]
if smoothksize:
out = gaussian_filter(out, smoothksize)
if not resize:
return out
return cv2.resize(out, arr.shape[:2][::-1],
interpolation=interpolation)
def frame(image, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT, color=[255, 0, 0]):
'''
add borders around :image:
:param image: has to be in RBG color scheme. Use `convert_to_rgb` if it's in opencv BGR scheme.
:param color: array representing an RGB color.
:param borderType: Other options are:
cv.BORDER_REFLECT,
cv.BORDER_REFLECT_101,
cv.BORDER_DEFAULT,
cv.BORDER_REPLICATE,
cv.BORDER_WRAP
'''
return cv.copyMakeBorder(image, top, bottom, left, right, borderType, value=color)
def create_dataset(opt, mode,fold=0):
convert = tnt.transform.compose([
lambda x: x.astype(np.float32),
lambda x: x / 255.0,
# cvtransforms.Normalize([125.3, 123.0, 113.9], [63.0, 62.1, 66.7]),
lambda x: x.transpose(2, 0, 1).astype(np.float32),
torch.from_numpy,
])
train_transform = tnt.transform.compose([
cvtransforms.RandomHorizontalFlip(),
cvtransforms.Pad(opt.randomcrop_pad, cv2.BORDER_REFLECT),
cvtransforms.RandomCrop(96),
convert,
])
smode = 'train' if mode else 'test'
ds = getattr(datasets, opt.dataset)('.', split=smode, download=True)
if mode:
if fold>-1:
folds_idx = [map(int, v.split(' ')[:-1])
for v in [line.replace('\n', '')
for line in open('./stl10_binary/fold_indices.txt')]][fold]
ds = tnt.dataset.TensorDataset([
getattr(ds, 'data').transpose(0, 2, 3, 1)[folds_idx],
getattr(ds, 'labels')[folds_idx].tolist()])
else:
ds = tnt.dataset.TensorDataset([
getattr(ds, 'data').transpose(0, 2, 3, 1),
getattr(ds, 'labels').tolist()])
else:
ds = tnt.dataset.TensorDataset([
getattr(ds, 'data').transpose(0, 2, 3, 1),
getattr(ds, 'labels').tolist()])
return ds.transform({0: train_transform if mode else convert})
def __call__(self, image, *args):
size=self.size
if self.type=='constant':
image = cv2.copyMakeBorder(image, size, size, size, size, cv2.BORDER_CONSTANT, value=self.constant_color)
elif self.type=='reflect':
image = cv2.copyMakeBorder(image, size, size, size, size, cv2.BORDER_REFLECT)
elif self.type=='replicate':
image = cv2.copyMakeBorder(image, size, size, size, size, cv2.BORDER_REPLICATE)
if len(args):
return (image, *args)
else:
return image
def create_iterator(opt, mode):
if opt.dataset.startswith('CIFAR'):
convert = tnt.transform.compose([
lambda x: x.astype(np.float32),
T.Normalize([125.3, 123.0, 113.9], [63.0, 62.1, 66.7]),
lambda x: x.transpose(2,0,1),
torch.from_numpy,
])
train_transform = tnt.transform.compose([
T.RandomHorizontalFlip(),
T.Pad(opt.randomcrop_pad, cv2.BORDER_REFLECT),
T.RandomCrop(32),
convert,
])
ds = getattr(datasets, opt.dataset)(opt.dataroot, train=mode, download=True)
smode = 'train' if mode else 'test'
ds = tnt.dataset.TensorDataset([getattr(ds, smode + '_data'),
getattr(ds, smode + '_labels')])
ds = ds.transform({0: train_transform if mode else convert})
return ds.parallel(batch_size=opt.batchSize, shuffle=mode,
num_workers=opt.nthread, pin_memory=True)
elif opt.dataset == 'ImageNet':
def cvload(path):
img = cv2.imread(path, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
convert = tnt.transform.compose([
lambda x: x.astype(np.float32) / 255.0,
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
lambda x: x.transpose(2, 0, 1).astype(np.float32),
torch.from_numpy,
])
print("| setting up data loader...")
if mode:
traindir = os.path.join(opt.dataroot, 'train')
ds = datasets.ImageFolder(traindir, tnt.transform.compose([
T.RandomSizedCrop(224),
T.RandomHorizontalFlip(),
convert,
]), loader=cvload)
else:
valdir = os.path.join(opt.dataroot, 'val')
ds = datasets.ImageFolder(valdir, tnt.transform.compose([
T.Scale(256),
T.CenterCrop(224),
convert,
]), loader=cvload)
return torch.utils.data.DataLoader(ds,
batch_size=opt.batchSize, shuffle=mode,
num_workers=opt.nthread, pin_memory=False)
else:
raise ValueError('dataset not understood')
def get_orb_keypoints(bd, image_min, image_max):
"""
Computes the ORB key points
Args:
bd (2d array)
image_min (int or float)
image_max (int or float)
"""
# We want odd patch sizes.
# if parameter_object.scales[-1] % 2 == 0:
# patch_size = parameter_object.scales[-1] - 1
if bd.dtype != 'uint8':
bd = np.uint8(rescale_intensity(bd,
in_range=(image_min,
image_max),
out_range=(0, 255)))
patch_size = 31
patch_size_d = patch_size * 3
# Initiate ORB detector
orb = cv2.ORB_create(nfeatures=int(.25*(bd.shape[0]*bd.shape[1])),
edgeThreshold=patch_size,
scaleFactor=1.2,
nlevels=8,
patchSize=patch_size,
WTA_K=4,
scoreType=cv2.ORB_FAST_SCORE)
# Add padding because ORB ignores edges.
bd = cv2.copyMakeBorder(bd, patch_size_d, patch_size_d, patch_size_d, patch_size_d, cv2.BORDER_REFLECT)
# Compute ORB keypoints
key_points = orb.detectAndCompute(bd, None)[0]
# img = cv2.drawKeypoints(np.uint8(ch_bd), key_points, np.uint8(ch_bd).copy())
return fill_key_points(np.float32(bd), key_points)[patch_size_d:-patch_size_d, patch_size_d:-patch_size_d]
main_small_sample_class_normalized.py 文件源码
项目:scalingscattering
作者: edouardoyallon
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def create_dataset(opt, mode):
convert = tnt.transform.compose([
lambda x: x.astype(np.float32),
lambda x: x / 255.0,
# cvtransforms.Normalize([125.3, 123.0, 113.9], [63.0, 62.1, 66.7]),
lambda x: x.transpose(2, 0, 1).astype(np.float32),
torch.from_numpy,
])
train_transform = tnt.transform.compose([
cvtransforms.RandomHorizontalFlip(),
cvtransforms.Pad(opt.randomcrop_pad, cv2.BORDER_REFLECT),
cvtransforms.RandomCrop(32),
convert,
])
ds = getattr(datasets, opt.dataset)('.', train=mode, download=True)
smode = 'train' if mode else 'test'
if mode:
from numpy.random import RandomState
prng = RandomState(opt.seed)
assert(opt.sampleSize%10==0)
random_permute=prng.permutation(np.arange(0,5000))[0:opt.sampleSize/10]
labels = np.array(getattr(ds,'train_labels'))
data = getattr(ds,'train_data')
classes=np.unique(labels)
inds_all=np.array([],dtype='int32')
for cl in classes:
inds=np.where(np.array(labels)==cl)[0][random_permute]
inds_all=np.r_[inds,inds_all]
ds = tnt.dataset.TensorDataset([
data[inds_all,:].transpose(0, 2, 3, 1),
labels[inds_all].tolist()])
else:
ds = tnt.dataset.TensorDataset([
getattr(ds, smode + '_data').transpose(0, 2, 3, 1),
getattr(ds, smode + '_labels')])
return ds.transform({0: train_transform if mode else convert})