def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True):
'''
set every the pixel value of the given [img] to the median filtered one
of a given kernel [size]
in case the relative [threshold] is exeeded
condition = '>' OR '<'
'''
from scipy.ndimage import median_filter
indices = None
if threshold > 0:
blur = np.asfarray(median_filter(img, size=size))
with np.errstate(divide='ignore', invalid='ignore', over='ignore'):
if condition == '>':
indices = abs((img - blur) / blur) > threshold
else:
indices = abs((img - blur) / blur) < threshold
if copy:
img = img.copy()
img[indices] = blur[indices]
return img, indices
python类median_filter()的实例源码
def prepare_n_mnist(filename, is_filter, num_spikes, step_factor=1):
"""Creates images from the specified n mnist recording
filename: path to the recording
is_filter: True if median filtering should be applied to the constructed image
num_spikes: number of unique spikes per image
step_factor: proportional amount to shift before generating the next image
1 would result in no overlapping events between images
0.6 would result in the next image overlapping with 40% of the previous image
returns: list of images, where each image is a 2d numpy array (height, width)
"""
td = ev.read_dataset(filename)
#td.show_td(100)
td.data = stabilize(td)
td.data = td.extract_roi([3, 3], [28, 28], True)
images = make_td_images(td, num_spikes, step_factor)
if is_filter:
images = ndimage.median_filter(images, 3)
#for image in images:
# cv2.imshow('img', image)
# cv2.waitKey(70)
return images
def prepare_n_mnist_continuous(filename, is_filter, is_normalize=False):
"""Creates image with pixel values indicating probability of a spike
filename: path to the recording
is_filter: True if median filtering should be applied to the constructed image
is_normalize: If True, the probabilities will be normalized to make the image more obvious
returns: image (2d numpy array (height, width))
"""
td = ev.read_dataset(filename)
#td.show_td(100)
td.data = stabilize(td)
td.data = td.extract_roi([0, 0], [28, 28], True)
#td.data = apply_tracking1(td)
#td.data = apply_tracking2(td)
#td.data = apply_tracking3(td)
#td.data = td.extract_roi([3, 3], [28, 28], True)
image = make_td_probability_image(td, 9, is_normalize)
if is_filter:
image = ndimage.median_filter(image, 3)
#cv2.imshow('img', image)
#cv2.waitKey(1)
return image
def median_filter(piv, size=2):
"""Computes a median filter on u and v"""
piv.u = mf(piv.u, footprint=disk(size))
piv.v = mf(piv.v, footprint=disk(size))
def subtract_background_median(z, footprint=19, implementation='scipy'):
"""Remove background using a median filter.
Parameters
----------
footprint : int
size of the window that is convoluted with the array to determine
the median. Should be large enough that it is about 3x as big as the
size of the peaks.
implementation: str
One of 'scipy', 'skimage'. Skimage is much faster, but it messes with
the data format. The scipy implementation is safer, but slower.
Returns
-------
Pattern with background subtracted as np.array
"""
if implementation == 'scipy':
bg_subtracted = z - ndi.median_filter(z, size=footprint)
elif implementation == 'skimage':
selem = morphology.square(footprint)
# skimage only accepts input image as uint16
bg_subtracted = z - filters.median(z.astype(np.uint16), selem).astype(z.dtype)
else:
raise ValueError("Unknown implementation `{}`".format(implementation))
return np.maximum(bg_subtracted, 0)
def run(self, ips, snap, img, para = None):
nimg.median_filter(snap, para['size'], output=img)
def medianFilter(data, window, interpolate=True):
'''
A median filter
If interpolate is True, data will be interpolated before smoothering.
Otherwise, all available data within the window will be used
@param data: Input data
@param window: Size of filter window
@param interpolate: Interpolate data before smoothing
@return Smoothed data
'''
if interpolate == True:
data = interpNaN(data)
result = pd.Series(median_filter(data, size=window), index=data.index)
else:
result = data.copy()
for index, value in data.iteritems():
if not pd.isnull(value):
result.loc[index] = np.nanmedian(data[np.logical_and(data.index > index-window/2,
data.index < index+window/2)])
return result
def denoise_mat(img,i):
return ndimage.median_filter(img, i)
def show_active_img_and_save_denoise(name,img,path):
mat = img.astype(np.float)
mat = - mat + 1
mat = mat * 255.0
mat[mat < 0] = 0
mat[mat > 255] = 255
mat=mat.astype(np.uint8)
mat = ndimage.median_filter(mat, 1)
cv2.imshow(name,mat)
cv2.imwrite(path,mat)
return
def show_active_img_and_save_denoise_filter(name,img,path):
mat = img.astype(np.float)
mat[mat<0.18] = 0
mat = - mat + 1
mat = mat * 255.0
mat[mat < 0] = 0
mat[mat > 255] = 255
mat=mat.astype(np.uint8)
mat = ndimage.median_filter(mat, 1)
cv2.imshow(name,mat)
cv2.imwrite(path,mat)
return
def show_active_img_and_save_denoise_filter2(name,img,path):
mat = img.astype(np.float)
mat[mat<0.1] = 0
mat = - mat + 1
mat = mat * 255.0
mat[mat < 0] = 0
mat[mat > 255] = 255
mat=mat.astype(np.uint8)
mat = ndimage.median_filter(mat, 1)
cv2.imshow(name,mat)
cv2.imwrite(path,mat)
return
def median_denoise(img, sigma=1):
return ndimage.median_filter(img, sigma)
def _icfmedian(i, stack, weight=None, cfwidth=None):
ufilt = 3 # set this to help with extreme over/under corrections
return ndi.median_filter(
ndi.uniform_filter(stack, (ufilt, 1)), (cfwidth, 1))
def filter_median(input_image_raster, filter_size):
'''
Median filtering of raster
'''
return ndimage.median_filter(input_image_raster, filter_size)
perturb_images.py 文件源码
项目:emotion-detection-in-images
作者: davidjeffwen
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def denoise(img):
return ndimage.median_filter(img, 2.5)
def populateMissingData(self,approach="Smooth",ilog=None):
'''
This function is used to interpolate missing data in the image.
'''
if approach == 'Smooth':
# first run a median filter over the array, then smooth the result.
#xmin,xmax,ymin,ymax,zmin,zmax = self.getRange()
mask = np.array(self.flags,dtype=np.bool)
z = self.getZImage().asMatrix2D()
median = nd.median_filter(z,size=(15,15))
mask = mask.flatten()
z = z.flatten()
median = median.flatten()
z[ mask==False ] = median[ mask==False ]
if ilog != None:
ilog.log(pv.Image(median.reshape(self.width,self.height)),label="Median")
ilog.log(pv.Image(z.reshape(self.width,self.height)),label="ZMedian")
mask = mask.flatten()
z = z.flatten()
median = median.flatten()
for i in range(5):
tmp = z.copy()
smooth = nd.gaussian_filter(z.reshape(self.width,self.height),2.0).flatten()
z[ mask==False ] = smooth[ mask==False ]
print "Iteration:",i,(z-tmp).max(),(z-tmp).min()
ilog.log(pv.Image(z.reshape(self.width,self.height)),label="ZSmooth%02d"%i)
ilog.log(pv.Image((z-tmp).reshape(self.width,self.height)),label="ZSmooth%02d"%i)
if approach == 'RBF':
mask = np.array(self.flags,dtype=np.bool)
mask = mask.flatten()
x = np.arange(self.width).reshape(self.width,1)
x = x*np.ones((1,self.height))
x = x.flatten()
y = np.arange(self.height).reshape(1,self.height)
y = y*np.ones((self.width,1))
y = y.flatten()
z = self.z.copy()
z = z.flatten()
print "Coords:"
print len(mask)
print len(x[mask])
print len(y[mask])
print len(z[mask])
# this produces an error. Probably has too much data
it.Rbf(x[mask],y[mask],z[mask])
pass
def fix_badpix_vip(img, bpm, box=5):
'''
Corrects the bad pixels, marked in the bad pixel mask.
The bad pixels are replaced by the median of the adjacent pixels
in a box of the povided box size. This function is very fast but
works best with isolated (sparse) pixels or very small clusters.
Copied and adapted from the the Vortex Image Processing package,
https://github.com/vortex-exoplanet/VIP, in which the function is
called fix_badpix_isolated.
This version is improved with respect to the VIP one by replacing
the bad pixels with NaNs in the image before applying the
median_filter. This allows to make sure that adjacent bad pixels
will not be taken into account when calculating the median.
Parameters
----------
img : array_like
Input 2D image
bpm : array_like, optional
Input bad pixel map. Good pixels have a value of 0, bad pixels
a value of 1.
box : odd int, optional
The size the box (box x box) of adjacent pixels for the
median filter. Default value is 5
Return
------
img_clean : array_like
Cleaned image
'''
if not img.ndim == 2:
raise ValueError('Main input is not a 2D array')
if not bpm.ndim == 2:
raise ValueError('Bad pixel map input is not a 2D array')
if box % 2 == 0:
raise ValueError('Box size of the median blur kernel must be an odd integer')
bpm = bpm.astype('bool')
bp = np.where(bpm)
img_clean = img.copy()
img_clean[bp] = np.nan
smoothed = ndimage.median_filter(img_clean, box, mode='mirror')
img_clean[bp] = smoothed[bp]
# replace uncorrected bad pixels with original value
mask = ~np.isfinite(img_clean)
img_clean[mask] = img[mask]
return img_clean
def threshold(image, *, sigma=0., radius=0, offset=0.,
method='sauvola', smooth_method='Gaussian'):
"""Use scikit-image filters to "intelligently" threshold an image.
Parameters
----------
image : array, shape (M, N, ...[, 3])
Input image, conformant with scikit-image data type
specification [1]_.
sigma : float, optional
If positive, use Gaussian filtering to smooth the image before
thresholding.
radius : int, optional
If given, use local median thresholding instead of global.
offset : float, optional
If given, reduce the threshold by this amount. Higher values
result in fewer pixels above the threshold.
method: {'sauvola', 'niblack', 'median'}
Which method to use for thresholding. Sauvola is 100x faster, but
median might be more accurate.
smooth_method: {'Gaussian', 'TV', 'NL'}
Which method to use for smoothing. Choose from Gaussian smoothing,
total variation denoising, and non-local means denoising.
Returns
-------
thresholded : image of bool, same shape as `image`
The thresholded image.
References
----------
.. [1] http://scikit-image.org/docs/dev/user_guide/data_types.html
"""
if sigma > 0:
if smooth_method.lower() == 'gaussian':
image = filters.gaussian(image, sigma=sigma)
elif smooth_method.lower() == 'tv':
image = restoration.denoise_tv_bregman(image, weight=sigma)
elif smooth_method.lower() == 'nl':
image = restoration.denoise_nl_means(image,
patch_size=round(2 * sigma))
if radius == 0:
t = filters.threshold_otsu(image) + offset
else:
if method == 'median':
footprint = hyperball(image.ndim, radius=radius)
t = ndi.median_filter(image, footprint=footprint) + offset
elif method == 'sauvola':
w = 2 * radius + 1
t = threshold_sauvola(image, window_size=w, k=offset)
elif method == 'niblack':
w = 2 * radius + 1
t = threshold_niblack(image, window_size=w, k=offset)
else:
raise ValueError('Unknown method %s. Valid methods are median,'
'niblack, and sauvola.' % method)
thresholded = image > t
return thresholded