def convert_new(fname, target_size):
print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
# draw_top_regions(properties, 3)
# return ba
bbox = properties[0].bbox
bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return np.array(resized)
python类square()的实例源码
def convert_new_regions(fname, target_size):
print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
draw_top_regions(properties, 3)
return ba
def convert(fname, target_size):
# print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
# draw_top_regions(properties, 3)
# return ba
bbox = properties[0].bbox
bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return resized
def convert(fname, target_size):
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
h, w, _ = ba.shape
if w > 1.2 * h:
left_max = ba[:, : w // 32, :].max(axis=(0, 1)).astype(int)
right_max = ba[:, - w // 32:, :].max(axis=(0, 1)).astype(int)
max_bg = np.maximum(left_max, right_max)
foreground = (ba > max_bg + 10).astype(np.uint8)
bbox = Image.fromarray(foreground).getbbox()
if bbox is None:
print('bbox none for {} (???)'.format(fname))
else:
left, upper, right, lower = bbox
# if we selected less than 80% of the original
# height, just crop the square
if right - left < 0.8 * h or lower - upper < 0.8 * h:
print('bbox too small for {}'.format(fname))
bbox = None
else:
bbox = None
if bbox is None:
bbox = square_bbox(img, fname)
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return np.array(resized)
def square_bbox(img, fname):
print("square bbox conversion done for image: %s" % fname)
w, h = img.size
left = max((w - h) // 2, 0)
upper = 0
right = min(w - (w - h) // 2, w)
lower = h
return (left, upper, right, lower)
def subtract_background_median(z, footprint=19, implementation='scipy'):
"""Remove background using a median filter.
Parameters
----------
footprint : int
size of the window that is convoluted with the array to determine
the median. Should be large enough that it is about 3x as big as the
size of the peaks.
implementation: str
One of 'scipy', 'skimage'. Skimage is much faster, but it messes with
the data format. The scipy implementation is safer, but slower.
Returns
-------
Pattern with background subtracted as np.array
"""
if implementation == 'scipy':
bg_subtracted = z - ndi.median_filter(z, size=footprint)
elif implementation == 'skimage':
selem = morphology.square(footprint)
# skimage only accepts input image as uint16
bg_subtracted = z - filters.median(z.astype(np.uint16), selem).astype(z.dtype)
else:
raise ValueError("Unknown implementation `{}`".format(implementation))
return np.maximum(bg_subtracted, 0)
def _coarsenImage(image, f):
'''
seems to be a more precise (but slower)
way to down-scale an image
'''
from skimage.morphology import square
from skimage.filters import rank
from skimage.transform._warps import rescale
selem = square(f)
arri = rank.mean(image, selem=selem)
return rescale(arri, 1 / f, order=0)
def positionToIntensityUncertaintyForPxGroup(image, std, y0, y1, x0, x1):
'''
like positionToIntensityUncertainty
but calculated average uncertainty for an area [y0:y1,x0:x1]
'''
fy, fx = y1 - y0, x1 - x0
if fy != fx:
raise Exception('averaged area need to be square ATM')
image = _coarsenImage(image, fx)
k = _kSizeFromStd(std)
y0 = int(round(y0 / fy))
x0 = int(round(x0 / fx))
arr = image[y0 - k:y0 + k, x0 - k:x0 + k]
U = positionToIntensityUncertainty(arr, std / fx, std / fx)
return U[k:-k, k:-k]
def whiteout_ramp(image, linecoords):
# Dilation enlarge the bright segments and cut them out off the original image
imagesection = image[linecoords.object]
count = 0
for i in morph.dilation(linecoords.object_matrix, morph.square(10)):
whitevalue = measurements.find_objects(i == linecoords.object_value + 1)
if whitevalue:
whitevalue = whitevalue[0][0]
imagesection[count,whitevalue.start:whitevalue.stop] = 255
count +=1
return 0
def __call__(self, img_small):
m = morphology.square(self.square_size)
img_th = morphology.black_tophat(img_small, m)
img_sob = abs(filters.sobel_v(img_th))
img_closed = morphology.closing(img_sob, m)
threshold = filters.threshold_otsu(img_closed)
return img_closed > threshold