def v_fx(screen):
dims = screen.get_size()
im1 = pygame.image.tostring(screen,'RGB')
im = Image.frombytes('RGB',(dims),im1)
im1 = im.filter(ImageFilter.BLUR)
im1.save('test.png','PNG')
return pygame.image.load('test.png')
python类BLUR的实例源码
def convert_new(fname, target_size):
print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
# draw_top_regions(properties, 3)
# return ba
bbox = properties[0].bbox
bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return np.array(resized)
def convert_new_regions(fname, target_size):
print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
draw_top_regions(properties, 3)
return ba
def convert(fname, target_size):
# print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
# draw_top_regions(properties, 3)
# return ba
bbox = properties[0].bbox
bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return resized
def createPic():
width = 240
height = 60
im = Image.new('RGB', (width, height), (255, 255, 255))
# ??font??:
font = ImageFont.truetype('arial.ttf', 35)
# ??draw??:
draw = ImageDraw.Draw(im)
# ??????:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=randColor())
# ????:
for t in range(4):
draw.text((60 * t + 10, 10), randWord(), font=font, fill=randColor2())
# ????
im = im.filter(ImageFilter.BLUR)
im.show()
im.save('code.jpg', 'jpeg')
def classfiy_aHash(image1,image2,size=(8,8),exact=25):
''' 'image1' and 'image2' is a Image Object.
You can build it by 'Image.open(path)'.
'Size' is parameter what the image will resize to it and then image will be compared by the algorithm.
It's 8 * 8 when it default.
'exact' is parameter for limiting the Hamming code between 'image1' and 'image2',it's 25 when it default.
The result become strict when the exact become less.
This function return the true when the 'image1' and 'image2' are similar.
'''
image1 = image1.resize(size).convert('L').filter(ImageFilter.BLUR)
image1 = ImageOps.equalize(image1)
code1 = getCode(image1, size)
image2 = image2.resize(size).convert('L').filter(ImageFilter.BLUR)
image2 = ImageOps.equalize(image2)
code2 = getCode(image2, size)
assert len(code1) == len(code2),"error"
return compCode(code1, code2)<=exact
def classify_ahash(cls, image1, image2, size=(8, 8), exact=25):
""" 'image1' and 'image2' is a Image Object.
You can build it by 'Image.open(path)'.
'Size' is parameter what the image will resize to it and then image will be compared by the algorithm.
It's 8 * 8 when it default.
'exact' is parameter for limiting the Hamming code between 'image1' and 'image2',it's 25 when it default.
The result become strict when the exact become less.
This function return the true when the 'image1' and 'image2' are similar.
"""
image1 = image1.resize(size).convert('L').filter(ImageFilter.BLUR)
image1 = ImageOps.equalize(image1)
code1 = cls.get_code(image1, size)
image2 = image2.resize(size).convert('L').filter(ImageFilter.BLUR)
image2 = ImageOps.equalize(image2)
code2 = cls.get_code(image2, size)
assert len(code1) == len(code2), "error"
return cls.compare_code(code1, code2)
def apply_effects(image, effects):
"""method to apply effects to original image from list of effects
"""
for effect in effects:
gray = ImageOps.grayscale(image)
# dictionary with all the availble effects
all_effects = {
'BLUR': image.filter(ImageFilter.BLUR),
'CONTOUR': image.filter(ImageFilter.CONTOUR),
'EMBOSS': image.filter(ImageFilter.EMBOSS),
'SMOOTH': image.filter(ImageFilter.SMOOTH),
'HULK': ImageOps.colorize(gray, (0, 0, 0, 0), '#00ff00'),
'FLIP': ImageOps.flip(image),
'MIRROR': ImageOps.mirror(image),
'INVERT': ImageOps.invert(image),
'SOLARIZE': ImageOps.solarize(image),
'GREYSCALE': ImageOps.grayscale(image),
}
phedited = all_effects[effect]
image = phedited
return phedited
def convert(fname, crop_size):
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
h, w, _ = ba.shape
if w > 1.2 * h:
left_max = ba[:, : w // 32, :].max(axis=(0, 1)).astype(int)
right_max = ba[:, - w // 32:, :].max(axis=(0, 1)).astype(int)
max_bg = np.maximum(left_max, right_max)
foreground = (ba > max_bg + 10).astype(np.uint8)
bbox = Image.fromarray(foreground).getbbox()
if bbox is None:
print('bbox none for {} (???)'.format(fname))
else:
left, upper, right, lower = bbox
# if we selected less than 80% of the original
# height, just crop the square
if right - left < 0.8 * h or lower - upper < 0.8 * h:
print('bbox too small for {}'.format(fname))
bbox = None
else:
bbox = None
if bbox is None:
bbox = square_bbox(img)
cropped = img.crop(bbox)
resized = cropped.resize([crop_size, crop_size])
return resized
def base_usage():
im=Image.open("data/original.jpg")
im2=Image.open("data/original.jpg")
print im
im=im.convert('L')
print im
image_data = im.getdata()
print image_data
#im.show()
(w,h)=im.size
print w,h
im.thumbnail((w/2,h/2))
im.save("data/small.jpg",'jpeg')
im2.filter(ImageFilter.BLUR)
im2.save("data/blur3.jpg",'jpeg')
def blur(arr, tf):
img = to_PIL(arr, tf)
if tf: return img.filter(ImageFilter.BLUR)
return to_theano(img.filter(ImageFilter.BLUR))
def convert(fname, target_size):
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
h, w, _ = ba.shape
if w > 1.2 * h:
left_max = ba[:, : w // 32, :].max(axis=(0, 1)).astype(int)
right_max = ba[:, - w // 32:, :].max(axis=(0, 1)).astype(int)
max_bg = np.maximum(left_max, right_max)
foreground = (ba > max_bg + 10).astype(np.uint8)
bbox = Image.fromarray(foreground).getbbox()
if bbox is None:
print('bbox none for {} (???)'.format(fname))
else:
left, upper, right, lower = bbox
# if we selected less than 80% of the original
# height, just crop the square
if right - left < 0.8 * h or lower - upper < 0.8 * h:
print('bbox too small for {}'.format(fname))
bbox = None
else:
bbox = None
if bbox is None:
bbox = square_bbox(img, fname)
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return np.array(resized)
def crop_image(fname,target_size):
print('Processing image: %s' % fname)
#otsu thresholding
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
gray_image = cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY)
retval2, threshold2 = cv2.threshold(gray_image, 125, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#storing white pixel in each row and column in two arrays
#these arrays are later used to find boundaries for cropping image
row_white_pixel_count=np.count_nonzero(threshold2,axis=1)
col_white_pixel_count=np.count_nonzero(threshold2,axis=0)
#find x,y,w,h for cropping image
y=find_boundary(row_white_pixel_count,col_white_pixel_count.size)
h=find_boundary_reverse(row_white_pixel_count,col_white_pixel_count.size)
x=find_boundary(col_white_pixel_count,row_white_pixel_count.size)
w=find_boundary_reverse(col_white_pixel_count,row_white_pixel_count.size)
crop_array = ba[y:h, x:w]
#resize the image
crop_img=Image.fromarray(crop_array)
resized = crop_img.resize([target_size, target_size])
#uncomment below line to see histogram of both white pixel vs rows and white pixel vs columns
subplots(threshold2, row_white_pixel_count, col_white_pixel_count, crop_img)
return resized
def blurImage(self):
"""Floute l'image à l'aide de PIL puis affiche le résultat."""
self.image = self.image.filter(ImageFilter.BLUR)
self.displayImage()
process_image.py 文件源码
项目:inception-face-shape-classifier
作者: adonistio
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def blur_img(imdir,outdir):
im = Image.open(imdir)
out_filename = outdir
out_img = im.filter(ImageFilter.BLUR).save(out_filename, 'JPEG', quality=100)
def labelfn(c, w, h):
if c not in df.index:
return None
x = Image.from_text_bounded(df.name[c], (w, h), 18, papply(arial, bold=True), padding=4, max_width=w, align="center", fg="black")
y = x.replace_color("black", "white", ignore_alpha=True).filter(ImageFilter.BLUR)
return y.place(x)
def test_draw_on_image_with_filters(self,
_draw_content_mock,
_save,
filter_mock):
filters = (ImageFilter.BLUR, ImageFilter.GaussianBlur(2))
with create_test_image():
filter_mock.return_value = PIL_Image.open('test.png')
self.img.draw_on_image(
image_path=os.path.abspath('test.png'),
image_filters=filters)
self.assertTrue(filter_mock.called)
self.assertTrue(_draw_content_mock.called)
def convert(fname, target_size=512):
img = Image.open(fname).convert('RGB')
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
h, w, _ = ba.shape
if w > 1.2 * h:
left_max = ba[:, : w // 32, :].max(axis=(0, 1)).astype(int)
right_max = ba[:, - w // 32:, :].max(axis=(0, 1)).astype(int)
max_bg = np.maximum(left_max, right_max)
foreground = (ba > max_bg + 10).astype(np.uint8)
bbox = Image.fromarray(foreground).getbbox()
if bbox is None:
print('bbox none for {} (???)'.format(fname))
else:
left, upper, right, lower = bbox
# if we selected less than 80% of the original
# height, just crop the square
if right - left < 0.8 * h or lower - upper < 0.8 * h:
print('bbox too small for {}'.format(fname))
bbox = None
else:
bbox = None
if bbox is None:
bbox = square_bbox(img, fname)
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return resized
def convert(image_fname, label_fname, target_size):
img = Image.open(image_fname)
label = Image.open(label_fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
h, w, _ = ba.shape
if w > 1.2 * h:
left_max = ba[:, : w // 32, :].max(axis=(0, 1)).astype(int)
right_max = ba[:, - w // 32:, :].max(axis=(0, 1)).astype(int)
max_bg = np.maximum(left_max, right_max)
foreground = (ba > max_bg + 10).astype(np.uint8)
bbox = Image.fromarray(foreground).getbbox()
if bbox is None:
print('bbox none for {} (???)'.format(image_fname))
else:
left, upper, right, lower = bbox
# if we selected less than 80% of the original
# height, just crop the square
if right - left < 0.8 * h or lower - upper < 0.8 * h:
print('bbox too small for {}'.format(image_fname))
bbox = None
else:
bbox = None
if bbox is None:
bbox = square_bbox(img, image_fname)
cropped_img = img.crop(bbox)
cropped_label = label.crop(bbox)
resized_img = cropped_img.resize([target_size, target_size])
resized_label = cropped_label.resize([target_size, target_size])
return resized_img, resized_label
def make_blur_img(file_name, path_dir):
file_path = path_dir + '/' + file_name
img = Image.open('%s' % file_path)
img.filter(ImageFilter.BLUR).save(path_dir + '/' + '%02s_blur'
% file_name.translate(None, '.png') + '.png', "PNG")
return
def call(self, img):
if img is None: raise ValueError('img is None')
im_n = img.copy()
gauss_blur_low, gauss_blur_high = 0, self.gauss_blur
blur_low, blur_high = gauss_blur_high, gauss_blur_high + self.blur
smooth_low, smooth_high = blur_high, blur_high + self.smooth
smooth_more_low, smooth_more_high = smooth_high, smooth_high + self.smooth_more
rank_low, rank_high = smooth_more_high, smooth_more_high + self.rank_filter
r = random()
if gauss_blur_low <= r <= gauss_blur_high:
im_n = im_n.filter(ImageFilter.GaussianBlur(1))
elif blur_low < r <= blur_high:
im_n = im_n.filter(ImageFilter.BLUR)
elif smooth_low < r <= smooth_high:
im_n = im_n.filter(ImageFilter.SMOOTH)
elif smooth_more_low < r <= smooth_more_high:
im_n = im_n.filter(ImageFilter.SMOOTH_MORE)
elif rank_low < r <= rank_high:
im_n = im_n.filter(ImageFilter.RankFilter(size=3, rank=7))
else:
pass
return im_n
def test_filter(path, image):
with Img(fp=path(image['sub'])) as src, TemporaryFile() as tf:
if 'mode' in image:
src.convert(image['mode'])
src.filter(ImageFilter.BLUR)
src.save(fp=tf)
with Img(fp=tf) as dest:
assert (dest.width, dest.height, dest.frame_count) == (
src.width, src.height, src.frame_count)
def generate_verify_image(font_path):
"""
???????
:param font_path: ???????????
:return: ????????base64?????
"""
width = 60 * 4
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
# ??Font??:
font = ImageFont.truetype(font_path, 36)
# ??Draw??:
draw = ImageDraw.Draw(image)
# ??????:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rand_background_color())
# ????:
rand_str = rand_char()
rand_str += rand_char()
rand_str += rand_char()
rand_str += rand_char()
for t in range(4):
draw.text((60 * t + 10, 10), rand_str[t], font=font, fill=rand_text_color())
# ??:
image = image.filter(ImageFilter.BLUR)
file_name = './static/img/' + str(time.time())
file_name += '.jpg'
image.save(file_name, 'jpeg')
f = open(file_name, 'rb')
str_image = b'data:image/jpeg;base64,'
str_image += base64.b64encode(f.read())
f.close()
os.remove(file_name)
return rand_str, bytes.decode(str_image)
def classify_DCT(image1,image2,size=(32,32),part_size=(8,8)):
""" 'image1' and 'image2' is a Image Object.
You can build it by 'Image.open(path)'.
'Size' is parameter what the image will resize to it and then image will be compared by the pHash.
It's 32 * 32 when it default.
'part_size' is a size of a part of the matrix after Discrete Cosine Transform,which need to next steps.
It's 8 * 8 when it default.
The function will return the hamming code,less is correct.
"""
assert size[0]==size[1],"size error"
assert part_size[0]==part_size[1],"part_size error"
image1 = image1.resize(size).convert('L').filter(ImageFilter.BLUR)
image1 = ImageOps.equalize(image1)
matrix = get_matrix(image1)
DCT_matrix = DCT(matrix)
List = sub_matrix_to_list(DCT_matrix, part_size)
middle = get_middle(List)
code1 = get_code(List, middle)
image2 = image2.resize(size).convert('L').filter(ImageFilter.BLUR)
image2 = ImageOps.equalize(image2)
matrix = get_matrix(image2)
DCT_matrix = DCT(matrix)
List = sub_matrix_to_list(DCT_matrix, part_size)
middle = get_middle(List)
code2 = get_code(List, middle)
return comp_code(code1, code2)
def recognize(self):
# ???????????????????
img = self.input_canvas.getImage().filter(ImageFilter.BLUR).convert('L')
img.thumbnail((28, 28), getattr(Image, 'ANTIALIAS'))
img = img.point(lambda x: 255 - x)
input = np.asarray(img).ravel()
result = self.nn.test([input / 255.0], np.zeros(10))[0]
num = max(enumerate(result), key=lambda x: x[1])[0]
self.result_label.configure(text = str(num))
print(num, result)
def make( numbers, width = 400, height = 200):
strs = ''.join(random.sample('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', numbers))
im = Image.new( 'RGB', (width, height ), (255,255,255))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype('verdana.ttf',width//numbers)
font_width , font_height = font.getsize(strs)
strs_len = len(strs)
x = (width - font_width) // 2
y = (height - font_height ) //2
total_dex = 0
for i in strs:
draw.text((x,y), i, random_col(), font)
temp = random.randint(-23,23)
total_dex += temp
im = im.rotate(temp)
draw = ImageDraw.Draw(im)
x += font_width/strs_len
im = im.rotate(-total_dex)
draw = ImageDraw.Draw(im)
draw.line(
[(random.randint(0,width//numbers),
random.randint(0,height//numbers)
),
(random.randint(width//numbers*(numbers-1),width),
random.randint(height//numbers*(numbers-1),height)
)],
fill = random_col(),
width = numbers+1)
draw.line(
[(random.randint(0,width//numbers),
random.randint(height//numbers*(numbers-1),height)
),
(random.randint(width//(numbers-1)*(numbers-2),width),
random.randint(0,height//(numbers-1))
)],
fill = random_col(),
width = numbers+1)
draw.line(
[(random.randint(width//4*3,width),
random.randint(height//4*3,height)
),
(random.randint(width//3*2,width),
random.randint(0,height//3)
)],
fill = random_col(),
width = numbers + 1)
for x in range(width):
for y in range(height):
col = im.getpixel((x,y))
if col == (255,255,255) or col == (0,0,0):
draw.point((x,y), fill = random_col())
im = im.filter(ImageFilter.BLUR)
im.save('out.jpg')