def velocity_ocr(image,coords,f1app):
# crop and convert image to greyscale
img = Image.fromarray(image).crop(coords).convert('L')
img = img.resize([img.width*2,img.height*2])
if f1app:
# filters for video from the f1 app
img = ImageEnhance.Brightness(img).enhance(3.0)
img = ImageEnhance.Contrast(img).enhance(2.0)
else:
# filters for onboard video graphic
img = ImageEnhance.Brightness(img).enhance(0.1)
img = ImageEnhance.Contrast(img).enhance(2.0)
img = ImageEnhance.Contrast(img).enhance(4.0)
img = ImageEnhance.Brightness(img).enhance(0.2)
img = ImageEnhance.Contrast(img).enhance(16.0)
try:
# vel = pytesseract.image_to_string(img,config='digits')
vel = pytesseract.image_to_string(img)
except UnicodeDecodeError:
vel = -1
return vel
python类Contrast()的实例源码
def account_by_qr(qr_file):
qr = qrtools.QR()
qr.decode(qr_file)
# Try to increase contrast if not recognized
if ('xrb_' not in qr.data):
image = Image.open(qr_file)
contrast = ImageEnhance.Contrast(image)
image = contrast.enhance(7)
image.save('{0}'.format(qr_file.replace('.jpg', '_.jpg')), 'JPEG')
qr2 = qrtools.QR()
qr2.decode('{0}'.format(qr_file.replace('.jpg', '_.jpg')))
#print(qr2.data)
qr = qr2
returning = qr.data.replace('xrb:', '').replace('raiblocks://', '').replace('raiblocks:', '').split('?')
# parsing amount
if (len(returning) > 1):
if ('amount=' in returning[1]):
returning[1] = returning[1].replace('amount=', '')
# don't use empty
if (len(returning[1]) == 0):
returning.pop()
else:
returning.pop()
return returning
def maskFace(self, frame_image, face):
img1 = cv2.imread(self.__class__.mask_path, cv2.IMREAD_UNCHANGED);
elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);
h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
mask = self.getTransPIL(cv2.warpPerspective(img1, h, (frame_image.width,frame_image.height)))
mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
enhancer = ImageEnhance.Color(frame_image)
enhanced = enhancer.enhance(0.1)
enhancer = ImageEnhance.Brightness(enhanced)
enhanced = enhancer.enhance(1.2)
enhancer = ImageEnhance.Contrast(enhanced)
enhanced = enhancer.enhance(1.2)
frame_image.paste(enhanced, (0,0), mask)
frame_image.paste(mask_elements, (0,0), mask_elements)
def change_contrast(image, contrast=1.0):
"""
Change contrast of image.
>>> image = np.eye(3, dtype='uint8') * 255
>>> change_contrast(image, 0.5)
array([[170, 42, 42],
[ 42, 170, 42],
[ 42, 42, 170]], dtype=uint8)
See
http://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html#PIL.ImageEnhance.Contrast
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float contrast: Contrast [0, 1]
:return: Image with changed contrast
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return enhance(image, ie.Contrast, contrast)
def contrast_transform(img, contrast_min=0.8, contrast_max=1.2):
"""Transform input image contrast
Transform the input image contrast by a factor returned by a unifrom
distribution with `contarst_min` and `contarst_max` as params
Args:
img: `ndarray`, input image
contrast_min: float, minimum contrast for transformation
contrast_max: float, maximum contrast for transformation
Returns:
`ndarray`, contrast enhanced image
"""
if isinstance(img, (np.ndarray)):
img = Image.fromarray(img)
contrast_param = np.random.uniform(contrast_min, contrast_max)
t_img = ImageEnhance.Contrast(img).enhance(contrast_param)
return np.array(t_img)
def produce(txt,img,ver=5,err_crt = qrcode.constants.ERROR_CORRECT_H,bri = 1.0, cont = 1.0,\
colourful = False, rgba = (0,0,0,255),pixelate = False):
"""Produce QR code
:txt: QR text
:img: Image path / Image object
:ver: QR version
:err_crt: QR error correct
:bri: Brightness enhance
:cont: Contrast enhance
:colourful: If colourful mode
:rgba: color to replace black
:pixelate: pixelate
:returns: list of produced image
"""
if type(img) is Image.Image:
pass
elif type(img) is str:
img = Image.open(img)
else:
return []
frames = [produce_impl(txt,frame.copy(),ver,err_crt,bri,cont,colourful,rgba,pixelate) for frame in ImageSequence.Iterator(img)]
return frames
def getJitteredImgs(self, img, num, maxRot=(-5.0, 5.0), maxTranslate=(-2.0, 2.0), maxScale=(-0.1, 0.1), augmentColor=False):
"""
Take img and jitter it
:return: a list of all jittered images
"""
cx = img.size[0] / 2
cy = img.size[1] / 2
tMats = self.getJitteredParams(center=(cx, cy), num=num, maxRot=maxRot, maxTranslate=maxTranslate,
maxScale=maxScale)
imgs = []
for i in range(len(tMats)):
t = tMats[i]
imgT = self.transformImg(img, t)
if augmentColor:
# jitter colors
color = ImageEnhance.Color(imgT)
imgT = color.enhance(self.rng.uniform(0.7, 1))
# jitter contrast
contr = ImageEnhance.Contrast(imgT)
imgT = contr.enhance(self.rng.uniform(0.7, 1))
# jitter brightness
bright = ImageEnhance.Brightness(imgT)
imgT = bright.enhance(self.rng.uniform(0.7, 1))
# add noise
im = numpy.asarray(imgT).astype('int') + numpy.rint(self.rng.normal(0, 4, numpy.asarray(imgT).shape)).astype('int')
im = numpy.clip(im, 0, 255).astype('uint8')
imgT = Image.fromarray(im)
# add image
imgs.append(imgT)
return imgs, tMats
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def random_contrast_shift(arr, tf):
img = to_PIL(arr, tf)
if tf: return ImageEnhance.Contrast(img).enhance(np.random.uniform(0.8, 1.2))
return to_theano(ImageEnhance.Contrast(img).enhance(np.random.uniform(0.8, 1.2)))
def ocr_appendcode():
#?????
image = Image.open("./data/token.jpg")
image = image.convert("L")
remove_noise.clearNoise(image,127,2,1)
#ocr??
enhancer = ImageEnhance.Contrast(image)
image_enhancer = enhancer.enhance(4)
text = pytesser.image_to_string(image_enhancer)
return filter(str.isdigit, text)
def contrast_enhance(self, factor, new_path=None, is_show=False):
if self.img is None:
img = Image.open(self.path)
else:
img = self.img
img = ImageEnhance.Contrast(img).enhance(factor)
if new_path is not None:
img.save(new_path)
if is_show:
img.show(title='contrast')
return img
def contrast(func_config):
def f(image_config):
factor = random.random() * (func_config.max_factor - func_config.min_factor) + func_config.min_factor
factor = misc.uniform_sample_from_interval(func_config.min_factor, func_config.max_factor)
image_config.image = ImageEnhance.Contrast(image_config.image).enhance(factor)
return f
def make_contrast_chg(file_name, path_dir):
file_path = path_dir + '/' + file_name
img = Image.open('%s' % file_path)
ImageEnhance.Contrast(img).enhance(0.8).save(path_dir + '/' + '%02s_ctrt'
% file_name.translate(None, '.png') + '.png', "PNG")
return
def inkwell(image):
# copy = grayscale(image)
# enhancer = ImageEnhance.Contrast(copy)
# enhancer.enhance(1.1)
# enhancer = ImageEnhance.Brightness(copy)
# enhancer.enhance(1.1)
return image
def nss(image):
size = image.size
nss = Image.new('RGBA', size, (243, 106, 188, 77))
enhancer = ImageEnhance.Contrast(nss)
enhancer.enhance(1.1)
enhancer = ImageEnhance.Brightness(nss)
enhancer.enhance(1.1)
enhancer = ImageEnhance.Color(nss)
enhancer.enhance(1.3)
copy = image.copy()
copy.paste(nss, (0, 0), nss)
return copy
def brannan(image):
size = image.size
brannan = Image.new('RGBA', size, (161, 44, 199, 79))
enhancer = ImageEnhance.Contrast(brannan)
enhancer.enhance(1.4)
copy = image.copy()
copy = sepia(image)
copy.paste(brannan, (0, 0), brannan)
return copy
def clarendon(image):
size = image.size
clarendon = Image.new('RGBA', size, (127, 187, 227, 51))
enhancer = ImageEnhance.Contrast(clarendon)
enhancer.enhance(1.20)
enhancer = ImageEnhance.Color(clarendon)
enhancer.enhance(1.35)
copy = image.copy()
copy.paste(clarendon, (0, 0), clarendon)
return copy
def call(self, img):
if img is None: raise ValueError('img is None')
im_n = img.copy()
r = random()
contrast_low, contrast_high = 0, self.contrast
brightness_low, brightness_high = contrast_high, contrast_high + self.brightness
sharpness_low, sharpness_high = brightness_high, brightness_high + self.sharpness
color_low, color_high = sharpness_high, sharpness_high + self.color
if contrast_low <= r < contrast_high:
factor_contrast = randint(5, 10)/10
enhancer = ImageEnhance.Contrast(im_n)
im_n = enhancer.enhance(factor_contrast)
elif brightness_low <= r < brightness_high:
factor_brightness = randint(5, 15)/10
enhancer = ImageEnhance.Brightness(im_n)
im_n = enhancer.enhance(factor_brightness)
elif sharpness_low <= r < sharpness_high:
factor_sharpen = randint(0, 20)/10
enhancer = ImageEnhance.Sharpness(im_n)
im_n = enhancer.enhance(factor_sharpen)
elif color_low <= r < color_high:
factor_color = randint(0, 20)/10
enhancer = ImageEnhance.Color(im_n)
im_n = enhancer.enhance(factor_color)
else:
pass
return im_n
def cutimage(url, day):
with urllib.request.urlopen(url) as response:
r = response.read()
menu_img = Image.open(BytesIO(r))
WIDTH = 370
HEIGHT = 123
X = 110
Y = 67
Y = Y + (HEIGHT * day)
dailybox = (X, Y, X + WIDTH, Y + HEIGHT)
menu = menu_img.crop(dailybox)
new_im = Image.new('L', (WIDTH, HEIGHT))
new_im.paste(menu, (0, 0))
enhancer = ImageEnhance.Contrast(new_im)
new_im = enhancer.enhance(0.85)
new_im = new_im.point(lambda i: i > 90 and 255)
new_im = new_im.resize((WIDTH, floor(HEIGHT * 0.56)), Image.BOX)
new_im = new_im.convert('1')
f = BytesIO()
new_im.save(f, format="png", optimize=True, compress_level=9, bits=4)
return create_img(f)
svmIdentifier.py 文件源码
项目:Verification-code-identification
作者: shenmilanzi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def img_preparation(img):
img = img.convert('L')
sharpness = ImageEnhance.Contrast(img)
sharp_img = sharpness.enhance(2.0)
two_value_out = sharp_img.point(table, '1')
return two_value_out
def randomColor(image):
"""
?????????
:param image: PIL???image
:return: ????????image
"""
random_factor = np.random.randint(0, 31) / 10. # ????
color_image = ImageEnhance.Color(image).enhance(random_factor) # ????????
random_factor = np.random.randint(10, 21) / 10. # ????
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # ???????
random_factor = np.random.randint(10, 21) / 10. # ???1?
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # ???????
random_factor = np.random.randint(0, 31) / 10. # ????
return ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # ??????
def prepare_image(image_fd):
"""Returns a PIL image from the given file descriptor. The image is
resized in order to filtrate dominant colors and ease k-means computation.
The contrast of the image is also enhanced to help find more saturated
colors.
"""
image = Image.open(image_fd)
image.thumbnail(THUMBNAIL_SIZE)
contrast = ImageEnhance.Contrast(image)
image_high_contrast = contrast.enhance(CONTRAST)
return image_high_contrast
def code(image):
im=Image.open('D:\\Users\\lujunjie\\Desktop\\guoshui\\code.jpg')
imgry = im.resize((256,256),Image.BILINEAR).convert('L') # ????????
sharpness = ImageEnhance.Contrast(imgry) # ?????
sharp_img = sharpness.enhance(4)
sharp_img.save("D:\\Users\\lujunjie\\Desktop\\guoshui\\image_code.jpg")
code = image_file_to_string('D:\\Users\\lujunjie\\Desktop\\guoshui\\image_code.jpg')
return code
def tesseract(self, img):
# keep the data
fileName = "tmp_"+int(time.time()+random.randint(1,99999)).__str__()+".jpeg"
while os.path.exists( fileName ):
fileName = "tmp_"+int(time.time()+random.randint(1,99999)).__str__()+".jpeg"
self.tmp_file = fileName
with open(self.tmp_file, "w") as oFd:
oFd.write(img)
# resolve noise
try:
im = Image.open(self.tmp_file)
enhancer = ImageEnhance.Color(im)
im = enhancer.enhance(0.0)
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(3.0)
enhancer = ImageEnhance.Brightness(im)
im = enhancer.enhance(10.0)
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(20.0)
enhancer = ImageEnhance.Sharpness(im)
im = enhancer.enhance(0.0)
im.save(self.tmp_file)
except Exception as e:
pass
else:
pass
# use tesseract
imgCode = os.popen("tesseract -l eng -psm 8 {} stdout 2>/dev/null"\
.format(self.tmp_file)).readline()[0:-1]
log.debug("Guess Ratio:{}/{}={}%".format(self.guess_hit+1, self.guess_total, \
((self.guess_hit+1)*100/(self.guess_total))))
os.remove( self.tmp_file )
return imgCode
def get_image(image_path,height,width):
"""
???????????????numpy.ndarray
image_path:string, height:?????? width:??????
return:numpy.ndarray???tensor
"""
im = Image.open(image_path).convert('L')
#im = ImageEnhance.Contrast(im).enhance(23)
b = reshape(im,height,width)
return b
def im_contrast(img_path, save_name, ratio):
img = Image.open(img_path)
img = ImageEnhance.Contrast(img)
img = img.enhance(ratio)
img.save(save_name, quality=100)
def TF_enhance_contrast(x, p=1.0, target=None):
assert len(x.shape) == 3
h, w, nc = x.shape
enhancer = ImageEnhance.Contrast(np_to_pil(x))
return pil_to_np(enhancer.enhance(p))
def infer(self, source_obj, embedding_ids, model_dir, save_dir, progress_file):
source_provider = InjectDataProvider(source_obj, None)
with open(progress_file, 'a') as f:
f.write("Start")
if isinstance(embedding_ids, int) or len(embedding_ids) == 1:
embedding_id = embedding_ids if isinstance(embedding_ids, int) else embedding_ids[0]
source_iter = source_provider.get_single_embedding_iter(self.batch_size, embedding_id)
else:
source_iter = source_provider.get_random_embedding_iter(self.batch_size, embedding_ids)
tf.global_variables_initializer().run()
saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
self.restore_model(saver, model_dir)
def save_imgs(imgs, count):
p = os.path.join(save_dir, "inferred_%04d.png" % count)
save_concat_images(imgs, img_path=p)
# print("generated images saved at %s" % p)
def save_sample(imgs, code):
p = os.path.join(save_dir, "inferred_%s.png" % code)
save_concat_images(imgs, img_path=p)
# print("generated images saved at %s" % p)
count = 0
batch_buffer = list()
for labels, codes, source_imgs in source_iter:
fake_imgs = self.generate_fake_samples(source_imgs, labels)[0]
for i in range(len(fake_imgs)):
# Denormalize image
gray_img = np.uint8(fake_imgs[i][:,:,0]*127.5+127.5)
pil_img = Image.fromarray(gray_img, 'L')
# Apply bilateralFilter
cv_img = np.array(pil_img)
cv_img = bilateralFilter(cv_img, 5, 10, 10)
pil_img = Image.fromarray(cv_img)
# Increase contrast
enhancer = ImageEnhance.Contrast(pil_img)
en_img = enhancer.enhance(1.5)
# Normalize image
fake_imgs[i][:,:,0] = Image.fromarray(np.array(en_img)/127.5 - 1.)
# save_sample(fake_imgs[i], codes[i])
merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
batch_buffer.append(merged_fake_images)
if len(batch_buffer) == 1:
save_sample(batch_buffer, codes[0])
batch_buffer = list()
count += 1
if batch_buffer:
# last batch
save_imgs(batch_buffer, count)
with open(progress_file, 'a') as f:
f.write("Done")
def crop_image_uniform(src_dir, dst_dir):
f = open("399-uniform.txt", "r")
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for page in range(1,4):
img = Image.open( src_dir + "/" + str(page) +"-uniform.png").convert('L')
width, height = img.size
cell_width = width/float(cols)
cell_height = height/float(rows)
header_offset = height/float(rows) * header_ratio
width_margin = cell_width * 0.10
height_margin = cell_height * 0.10
for j in range(0,rows):
for i in range(0,cols):
left = i * cell_width
upper = j * cell_height + header_offset
right = left + cell_width
lower = (j+1) * cell_height
center_x = (left + right) / 2
center_y = (upper + lower) / 2
crop_width = right - left - 2*width_margin
crop_height = lower - upper - 2*height_margin
size = 0
if crop_width > crop_height:
size = crop_height/2
else:
size = crop_width/2
left = center_x - size;
right = center_x + size;
upper = center_y - size;
lower = center_y + size;
code = f.readline()
if not code:
break
else:
name = dst_dir + "/uni" + code.strip() + ".png"
cropped_image = img.crop((left, upper, right, lower))
cropped_image = cropped_image.resize((128,128), Image.LANCZOS)
# Increase constrast
enhancer = ImageEnhance.Contrast(cropped_image)
cropped_image = enhancer.enhance(1.5)
opencv_image = np.array(cropped_image)
opencv_image = bilateralFilter(opencv_image, 9, 30, 30)
cropped_image = Image.fromarray(opencv_image)
cropped_image.save(name)
print("Processed uniform page " + str(page))
def ModifyImg(self,img_name):
global val_img
val_img+=1
if val_img <= 1:
print('??????????----------')
if os.path.isdir(Modif):
pass
else:
mkdir = os.makedirs(Modif)
print('????????????????????----------')
print('???????????????????->'+Modif)
else:
pass
img = Image.open(img_name)
img = img.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(2)
img = img.convert('1')
width, height = img.size
data = []
for i in range(height):
tmp=[]
for j in range(width):
if(img.getpixel((j,i)) == 255 ):
tmp.append(1)
else:
tmp.append(0)
data.append(tmp)
img2 = Image.new("P",img.size, 255)
for y in range(height):
for a in range(len(data[y])):
o = y+1
t = y+2
#s = y+3
z = a+1
x = a+2
try:
if data[o][a] == 0 and data[t][a] == 0 and data[y][z] == 0 and data[y][x] == 0:#and data[s][a] == 0
img2.putpixel((a,y),1)
img2.save(Modif+str(val_img)+'.png')
except:
pass
img2_path = Modif+str(val_img)+'.png'
image = Image.open(img2_path)
image = image.convert("L")
self.clearNoise(image,53,4,8)
image.save(img2_path)
image.show()
self.ImgCutting(img2_path)