def build_random_variables(self, **kwargs):
# All this is done just once per batch (i.e. until `clear_random_variables` is called)
np.random.seed()
imshape = kwargs.get('imshape')
# Build and scale random fields
random_field_x = np.random.uniform(-1, 1, imshape) * self.alpha
random_field_y = np.random.uniform(-1, 1, imshape) * self.alpha
# Smooth random field (this has to be done just once per reset)
sdx = gaussian_filter(random_field_x, self.sigma, mode='reflect')
sdy = gaussian_filter(random_field_y, self.sigma, mode='reflect')
# Make meshgrid
x, y = np.meshgrid(np.arange(imshape[1]), np.arange(imshape[0]))
# Make inversion coefficient
_inverter = 1. if not self.invert else -1.
# Distort meshgrid indices (invert if required)
flow_y, flow_x = (y + _inverter * sdy).reshape(-1, 1), (x + _inverter * sdx).reshape(-1, 1)
# Set random states
self.set_random_variable('flow_x', flow_x)
self.set_random_variable('flow_y', flow_y)
python类gaussian_filter()的实例源码
def demo(stem):
flist = getimgfiles(stem)
ext = flist[0].suffix
for i in range(len(flist)-1):
fn1 = f'{stem}.{i}{ext}'
im1 = imread(fn1,flatten=True).astype(float) #flatten=True is rgb2gray
# Iold = gaussian_filter(Iold,FILTER)
fn2 = f'{stem}.{i+1}{ext}'
im2 = imread(fn2,flatten=True).astype(float)
# Inew = gaussian_filter(Inew,FILTER)
U,V = HornSchunck(im1, im2, 1., 100)
compareGraphs(U,V, im2)
return U,V
def demo(stem, kernel=5,Nfilter=7):
flist = getimgfiles(stem)
ext = flist[0].suffix
#%% priming read
im1 = imread(f'{stem}.0{ext}', flatten=True)
Y,X = im1.shape
#%% evaluate the first frame's POI
POI = getPOI(X,Y,kernel)
#% get the weights
W = gaussianWeight(kernel)
#%% loop over all images in directory
for i in range(1,len(flist)):
im2 = imread(f'{stem}.{i}{ext}', flatten=True)
im2 = gaussian_filter(im2, Nfilter)
V = LucasKanade(im1, im2, POI, W, kernel)
compareGraphsLK(im1, im2, POI, V)
im1 = im2
def compute_colseps_conv(binary,scale=1.0):
"""Find column separators by convoluation and
thresholding."""
h,w = binary.shape
# find vertical whitespace by thresholding
smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5))
smoothed = uniform_filter(smoothed,(5.0*scale,1))
thresh = (smoothed<amax(smoothed)*0.1)
DSAVE("1thresh",thresh)
# find column edges by filtering
grad = gaussian_filter(1.0*binary,(scale,scale*0.5),order=(0,1))
grad = uniform_filter(grad,(10.0*scale,1))
# grad = abs(grad) # use this for finding both edges
grad = (grad>0.5*amax(grad))
DSAVE("2grad",grad)
# combine edges and whitespace
seps = minimum(thresh,maximum_filter(grad,(int(scale),int(5*scale))))
seps = maximum_filter(seps,(int(2*scale),1))
DSAVE("3seps",seps)
# select only the biggest column separators
seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps'])
DSAVE("4seps",seps)
return seps
def compute_gradmaps(binary,scale):
# use gradient filtering to find baselines
boxmap = psegutils.compute_boxmap(binary,scale)
cleaned = boxmap*binary
DSAVE("cleaned",cleaned)
if args['usegause']:
# this uses Gaussians
grad = gaussian_filter(1.0*cleaned,(args['vscale']*0.3*scale,
args['hscale']*6*scale),order=(1,0))
else:
# this uses non-Gaussian oriented filters
grad = gaussian_filter(1.0*cleaned,(max(4,args['vscale']*0.3*scale),
args['hscale']*scale),order=(1,0))
grad = uniform_filter(grad,(args['vscale'],args['hscale']*6*scale))
bottom = ocrolib.norm_max((grad<0)*(-grad))
top = ocrolib.norm_max((grad>0)*grad)
return bottom,top,boxmap
def measure(self,line):
h,w = line.shape
smoothed = filters.gaussian_filter(line,(h*0.5,h*self.smoothness),mode='constant')
smoothed += 0.001*filters.uniform_filter(smoothed,(h*0.5,w),mode='constant')
self.shape = (h,w)
a = argmax(smoothed,axis=0)
a = filters.gaussian_filter(a,h*self.extra)
self.center = array(a,'i')
deltas = abs(arange(h)[:,newaxis]-self.center[newaxis,:])
self.mad = mean(deltas[line!=0])
self.r = int(1+self.range*self.mad)
if self.debug:
figure("center")
imshow(line,cmap=cm.gray)
plot(self.center)
ginput(1,1000)
def elastic_transform(image, alpha, sigma, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape[1:];
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
#return map_coordinates(image, indices, order=1).reshape(shape)
res = np.zeros_like(image);
for i in xrange(image.shape[0]):
res[i] = map_coordinates(image[i], indices, order=1).reshape(shape)
return res;
def level_curves(fname, npoints = 200, smoothing = 10, level = 0.5) :
"Loads regularly sampled curves from a .PNG image."
# Find the contour lines
img = misc.imread(fname, flatten = True) # Grayscale
img = (img.T[:, ::-1]) / 255.
img = gaussian_filter(img, smoothing, mode='nearest')
lines = find_contours(img, level)
# Compute the sampling ratio for every contour line
lengths = np.array( [arclength(line) for line in lines] )
points_per_line = np.ceil( npoints * lengths / np.sum(lengths) )
# Interpolate accordingly
points = [] ; connec = [] ; index_offset = 0
for ppl, line in zip(points_per_line, lines) :
(p, c) = resample(line, ppl)
points.append(p)
connec.append(c + index_offset)
index_offset += len(p)
size = np.maximum(img.shape[0], img.shape[1])
points = np.vstack(points) / size
connec = np.vstack(connec)
return Curve(points, connec)
# Pyplot Output =================================================================================
def level_curves(fname, npoints = 200, smoothing = 10, level = 0.5) :
"Loads regularly sampled curves from a .PNG image."
# Find the contour lines
img = misc.imread(fname, flatten = True) # Grayscale
img = (img.T[:, ::-1]) / 255.
img = gaussian_filter(img, smoothing, mode='nearest')
lines = find_contours(img, level)
# Compute the sampling ratio for every contour line
lengths = np.array( [arclength(line) for line in lines] )
points_per_line = np.ceil( npoints * lengths / np.sum(lengths) )
# Interpolate accordingly
points = [] ; connec = [] ; index_offset = 0
for ppl, line in zip(points_per_line, lines) :
(p, c) = resample(line, ppl)
points.append(p)
connec.append(c + index_offset)
index_offset += len(p)
size = np.maximum(img.shape[0], img.shape[1])
points = np.vstack(points) / size
connec = np.vstack(connec)
return Curve(points, connec)
# Pyplot Output =================================================================================
def detect_peaks(hist, count=2):
hist_copy = hist
peaks = len(argrelextrema(hist_copy, np.greater, mode="wrap")[0])
sigma = log1p(peaks)
print(peaks, sigma)
while (peaks > count):
new_hist = gaussian_filter(hist_copy, sigma=sigma)
peaks = len(argrelextrema(new_hist, np.greater, mode="wrap")[0])
if peaks < count:
peaks = count + 1
sigma = sigma * 0.5
continue
hist_copy = new_hist
sigma = log1p(peaks)
print(peaks, sigma)
return argrelextrema(hist_copy, np.greater, mode="wrap")[0]
def standardDeviation2d(img, ksize=5, blurred=None):
'''
calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time
'''
if ksize not in (list, tuple):
ksize = (ksize,ksize)
if blurred is None:
blurred = gaussian_filter(img, ksize)
else:
assert blurred.shape == img.shape
std = np.empty_like(img)
_calc(img, ksize[0], ksize[1], blurred, std)
return std
def gaussian_filter(self,sigma_x=0.0,sigma_y=0.0):
'''
Applies a gaussian filter to the seismic velocity field to mimic
the loss of spatial resolution introduced in tomographic imaging
'''
from scipy.ndimage.filters import gaussian_filter
#filter absolute perturbations
dvp_filtered = gaussian_filter(self.dvp_abs,sigma=[sigma_x,sigma_y])
dvs_filtered = gaussian_filter(self.dvs_abs,sigma=[sigma_x,sigma_y])
drho_filtered = gaussian_filter(self.drho_abs,sigma=[sigma_x,sigma_y])
self.dvp_abs = dvp_filtered
self.dvs_abs = dvs_filtered
self.drho_abs = drho_filtered
#filter relative perturbations
dvp_filtered = gaussian_filter(self.dvp_rel,sigma=[sigma_x,sigma_y])
dvs_filtered = gaussian_filter(self.dvs_rel,sigma=[sigma_x,sigma_y])
drho_filtered = gaussian_filter(self.drho_rel,sigma=[sigma_x,sigma_y])
self.dvp_rel = dvp_filtered
self.dvs_rel = dvs_filtered
self.drho_rel = drho_filtered
def _apply(self, a, epsilons=1000):
image = a.original_image
min_, max_ = a.bounds()
axis = a.channel_axis(batch=False)
hw = [image.shape[i] for i in range(image.ndim) if i != axis]
h, w = hw
size = max(h, w)
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 1, num=epsilons + 1)[1:]
for epsilon in epsilons:
# epsilon = 1 will correspond to
# sigma = size = max(width, height)
sigmas = [epsilon * size] * 3
sigmas[axis] = 0
blurred = gaussian_filter(image, sigmas)
blurred = np.clip(blurred, min_, max_)
_, is_adversarial = a.predictions(blurred)
if is_adversarial:
return
def run_edges(image):
''' This function finds and colors all edges in the given image.
'''
# Convert image to gray
if len(image.shape) > 2:
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
grayimage = image
# blur so the gradient operation is less noisy.
# uses a gaussian filter with sigma = 2
grayimage = gaussian_filter(grayimage, 2).astype(float)
# Filter with x and y sobel filters
dx = convolve2d(grayimage, sobel_filter_x())
dy = convolve2d(grayimage, sobel_filter_y())
# Convert to orientation and magnitude images
theta = transform_xy_theta(dx, dy)
mag = transform_xy_mag(dx, dy)
outimg = np.zeros((image.shape[0], image.shape[1], 3), dtype = np.uint8)
# Fill with corresponding color.
for r in range(outimg.shape[0]):
for c in range(outimg.shape[1]):
outimg[r,c,:] = get_color(theta[r,c], mag[r,c])
return outimg
def gauss_degrade(image,margin=1.0,change=None,noise=0.02,minmargin=0.5,inner=1.0):
if image.ndim==3: image = mean(image,axis=2)
m = mean([amin(image),amax(image)])
image = 1*(image>m)
if margin<minmargin: return 1.0*image
pixels = sum(image)
if change is not None:
npixels = int((1.0+change)*pixels)
else:
edt = distance_transform_edt(image==0)
npixels = sum(edt<=(margin+1e-4))
r = int(max(1,2*margin+0.5))
ri = int(margin+0.5-inner)
if ri<=0: mask = binary_dilation(image,iterations=r)-image
else: mask = binary_dilation(image,iterations=r)-binary_erosion(image,iterations=ri)
image += mask*randn(*image.shape)*noise*min(1.0,margin**2)
smoothed = gaussian_filter(1.0*image,margin)
frac = max(0.0,min(1.0,npixels*1.0/prod(image.shape)))
threshold = mquantiles(smoothed,prob=[1.0-frac])[0]
result = (smoothed>threshold)
return 1.0*result
def measure(self,line):
h,w = line.shape
smoothed = filters.gaussian_filter(line,(h*0.5,h*self.smoothness),mode='constant')
smoothed += 0.001*filters.uniform_filter(smoothed,(h*0.5,w),mode='constant')
self.shape = (h,w)
a = argmax(smoothed,axis=0)
a = filters.gaussian_filter(a,h*self.extra)
self.center = array(a,'i')
deltas = abs(arange(h)[:,newaxis]-self.center[newaxis,:])
self.mad = mean(deltas[line!=0])
self.r = int(1+self.range*self.mad)
if self.debug:
figure("center")
imshow(line,cmap=cm.gray)
plot(self.center)
ginput(1,1000)
def get_smoothed_white(self, npix=2, save=True, show=False, **kwargs):
"""Gets an smoothed version (Gaussian of sig=npix)
of the white image. If save is True, it writes a file
to disk called `smoothed_white.fits`.
**kwargs are passed down to scipy.ndimage.gaussian_filter()
"""
hdulist = self.hdulist_white
im = self.white_data
if npix > 0:
smooth_im = ndimage.gaussian_filter(im, sigma=npix, **kwargs)
else:
smooth_im = im
if save:
hdulist[1].data = smooth_im
prihdr = hdulist[0].header
comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix)
# print(comment)
prihdr['history'] = comment
hdulist.writeto('smoothed_white.fits', clobber=True)
if show:
fig = aplpy.FITSFigure('smoothed_white.fits', figure=plt.figure())
fig.show_grayscale(vmin=self.vmin,vmax=self.vmax)
return smooth_im
def get_smoothed_white(self, npix=2, save=True, show=False, **kwargs):
"""Gets an smoothed version (Gaussian of sig=npix)
of the white image. If save is True, it writes a file
to disk called `smoothed_white.fits`.
**kwargs are passed down to scipy.ndimage.gaussian_filter()
"""
hdulist = self.hdulist_white
im = self.white_data
if npix > 0:
smooth_im = ndimage.gaussian_filter(im, sigma=npix, **kwargs)
else:
smooth_im = im
if save:
hdulist[1].data = smooth_im
prihdr = hdulist[0].header
comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix)
# print(comment)
prihdr['history'] = comment
hdulist.writeto('smoothed_white.fits', clobber=True)
if show:
fig = aplpy.FITSFigure('smoothed_white.fits', figure=plt.figure())
fig.show_grayscale(vmin=self.vmin,vmax=self.vmax)
return smooth_im
def compute_gradmaps(binary, scale, usegauss, vscale, hscale, debug=False):
# use gradient filtering to find baselines
boxmap = psegutils.compute_boxmap(binary,scale)
cleaned = boxmap*binary
if debug:
debug_show(cleaned, "cleaned")
if usegauss:
# this uses Gaussians
grad = gaussian_filter(1.0*cleaned,(vscale*0.3*scale,
hscale*6*scale),
order=(1,0))
else:
# this uses non-Gaussian oriented filters
grad = gaussian_filter(1.0*cleaned, (max(4, vscale*0.3*scale),
hscale*scale ), order=(1,0))
grad = uniform_filter(grad, (vscale, hscale*6*scale))
if debug:
debug_show(grad, "compute_gradmaps grad")
bottom = ocrolib.norm_max((grad<0)*(-grad))
top = ocrolib.norm_max((grad>0)*grad)
if debug:
debug_show(bottom, "compute_gradmaps bottom")
debug_show(top, "compute_gradmaps top")
return bottom, top, boxmap
def measure(self,line):
h,w = line.shape
smoothed = filters.gaussian_filter(line,(h*0.5,h*self.smoothness),mode='constant')
smoothed += 0.001*filters.uniform_filter(smoothed,(h*0.5,w),mode='constant')
self.shape = (h,w)
a = argmax(smoothed,axis=0)
a = filters.gaussian_filter(a,h*self.extra)
self.center = array(a,'i')
deltas = abs(arange(h)[:,newaxis]-self.center[newaxis,:])
self.mad = mean(deltas[line!=0])
self.r = int(1+self.range*self.mad)
if self.debug:
figure("center")
imshow(line,cmap=cm.gray)
plot(self.center)
ginput(1,1000)
augmentation.py 文件源码
项目:ultrasound-nerve-segmentation
作者: EdwardTyantov
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def elastic_transform(image, mask, alpha, sigma, alpha_affine=None, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
res_x = map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
res_y = map_coordinates(mask, indices, order=1, mode='reflect').reshape(shape)
return res_x, res_y
def generate_dog(img, nb_octaves, nb_per_octave=4):
"""Generate the difference of gaussians of an image.
Args:
img The input image
nb_octaves Number of octaves (groups of images with similar smoothing/sigmas)
nb_per_octave Number of images in one octave (with increasing smoothing/sigmas)
Returns:
List of (difference image, sigma value)
"""
spaces = []
sigma_start = 1.6
k_start = math.sqrt(2)
for i in range(nb_octaves):
sigma = sigma_start * (2 ** i)
last_gauss = None
for j in range(nb_per_octave+1):
k = k_start ** (j+1)
gauss = filters.gaussian_filter(img, k*sigma)
if last_gauss is not None:
diff = gauss - last_gauss
spaces.append((diff, k*sigma))
last_gauss = gauss
return spaces
def transform2(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True, blur=3):
if crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
image = np.array(cropped_image)
#blurring
r = filters.gaussian_filter(image[:, :, 0], blur)
g = filters.gaussian_filter(image[:, :, 1], blur)
b = filters.gaussian_filter(image[:, :, 2], blur)
image_blurred = np.dstack((r, g, b))
return [image/127.5 - 1., image_blurred/127.5 - 1.0]
def TF_elastic_deform(img, alpha=1.0, sigma=1.0):
"""Elastic deformation of images as described in Simard 2003"""
assert len(img.shape) == 3
h, w, nc = img.shape
if nc != 1:
raise NotImplementedError("Multi-channel not implemented.")
# Generate uniformly random displacement vectors, then convolve with gaussian kernel
# and finally multiply by a magnitude coefficient alpha
dx = alpha * gaussian_filter(
(np.random.random((h, w)) * 2 - 1), sigma, mode="constant", cval=0
)
dy = alpha * gaussian_filter(
(np.random.random((h, w)) * 2 - 1), sigma, mode="constant", cval=0
)
# Map image to the deformation mesh
x, y = np.meshgrid(np.arange(h), np.arange(w), indexing='ij')
indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))
return map_coordinates(img.reshape((h,w)), indices, order=1).reshape(h,w,nc)
def generateGaussianKernel(size):
kernel = np.zeros((size, size))
kernel[size // 2, size // 2] = 1.
gauss = fi.gaussian_filter(kernel, size // 2 // 3)
gauss[gauss < gauss[0, size // 2]] = 0.
return gauss
def preprocess(inputfile, outputfile, order=0, df=None, input_key=None, output_key=None):
img = nib.load(inputfile)
data = img.get_data()
affine = img.affine
zoom = img.header.get_zooms()[:3]
data, affine = reslice(data, affine, zoom, (1., 1., 1.), order)
data = np.squeeze(data)
data = np.pad(data, [(0, 256 - len_) for len_ in data.shape], "constant")
if order == 0:
if df is not None:
tmp = np.zeros_like(data)
for target, source in zip(df[output_key], df[input_key]):
tmp[np.where(data == source)] = target
data = tmp
data = np.int32(data)
assert data.ndim == 3, data.ndim
else:
data_sub = data - gaussian_filter(data, sigma=1)
img = sitk.GetImageFromArray(np.copy(data_sub))
img = sitk.AdaptiveHistogramEqualization(img)
data_clahe = sitk.GetArrayFromImage(img)[:, :, :, None]
data = np.concatenate((data_clahe, data[:, :, :, None]), 3)
data = (data - np.mean(data, (0, 1, 2))) / np.std(data, (0, 1, 2))
assert data.ndim == 4, data.ndim
assert np.allclose(np.mean(data, (0, 1, 2)), 0.), np.mean(data, (0, 1, 2))
assert np.allclose(np.std(data, (0, 1, 2)), 1.), np.std(data, (0, 1, 2))
data = np.float32(data)
img = nib.Nifti1Image(data, affine)
nib.save(img, outputfile)
def compute_colseps_mconv(binary,scale=1.0):
"""Find column separators using a combination of morphological
operations and convolution."""
h,w = binary.shape
smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5))
smoothed = uniform_filter(smoothed,(5.0*scale,1))
thresh = (smoothed<amax(smoothed)*0.1)
DSAVE("1thresh",thresh)
blocks = morph.rb_closing(binary,(int(4*scale),int(4*scale)))
DSAVE("2blocks",blocks)
seps = minimum(blocks,thresh)
seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps'])
DSAVE("3seps",seps)
blocks = morph.r_dilation(blocks,(5,5))
DSAVE("4blocks",blocks)
seps = maximum(seps,1-blocks)
DSAVE("5combo",seps)
return seps
def errors(self,range=10000,smooth=0):
result = self.error_log[-range:]
if smooth>0: result = filters.gaussian_filter(result,smooth,mode='mirror')
return result
def cerrors(self,range=10000,smooth=0):
result = [e*1.0/max(1,n) for e,n in self.cerror_log[-range:]]
if smooth>0: result = filters.gaussian_filter(result,smooth,mode='mirror')
return result
def gauss_distort(images,maxdelta=2.0,sigma=10.0):
n,m = images[0].shape
deltas = randn(2,n,m)
deltas = gaussian_filter(deltas,(0,sigma,sigma))
deltas /= max(amax(deltas),-amin(deltas))
deltas *= maxdelta
xy = transpose(array(meshgrid(range(n),range(m))),axes=[0,2,1])
# print(xy.shape, deltas.shape)
deltas += xy
return [map_coordinates(image,deltas,order=1) for image in images]