def subtract_background(self):
fgbg = cv2.createBackgroundSubtractorMOG2()
prev = self.frames[0]
fgmask = fgbg.apply(prev)
for (i,next) in enumerate(self.frames[1:]):
prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
next_gray = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)
similarity_metric = compare_ssim(prev_gray, next_gray)
print('prev/next similarity measure = %f' % similarity_metric)
if similarity_metric < self.transition_threshold:
fgmask = fgbg.apply(next)
fgdn = denoise_foreground(next, fgmask)
self.transitions.append((1, fgdn))
else:
fgmask = fgbg.apply(next)
self.transitions.append((0, None))
prev = next.copy()
python类COLOR_BGR2GRAY的实例源码
def find_bib(image):
width, height, depth = image.shape
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
#gray = cv2.equalizeHist(gray)
blurred = cv2.GaussianBlur(gray,(5,5),0)
debug_output("find_bib_blurred", blurred)
#binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0);
ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
#ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY);
debug_output("find_bib_binary", binary)
threshold_contours,hierarchy = find_contours(binary)
debug_output("find_bib_threshold", binary)
edges = cv2.Canny(gray,175,200, 3)
edge_contours,hierarchy = find_contours(edges)
debug_output("find_bib_edges", edges)
contours = threshold_contours + edge_contours
debug_output_contours("find_bib_threshold_contours", image, contours)
rectangles = get_rectangles(contours)
debug_output_contours("find_bib_rectangles", image, rectangles)
potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)]
debug_output_contours("find_bib_potential_bibs", image, potential_bibs)
ideal_aspect_ratio = 1.0
potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio))
return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]])
#
# Checks that the size and aspect ratio of the contour is appropriate for a bib.
#
def scrub(cls, image):
"""
Apply Stroke-Width Transform to image.
:param filepath: relative or absolute filepath to source image
:return: numpy array representing result of transform
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
canny, sobelx, sobely, theta = cls._create_derivative(gray)
swt = cls._swt(theta, canny, sobelx, sobely)
shapes = cls._connect_components(swt)
swts, heights, widths, topleft_pts, images = cls._find_letters(swt, shapes)
if(len(swts)==0):
#didn't find any text, probably a bad face
return None
word_images = cls._find_words(swts, heights, widths, topleft_pts, images)
final_mask = np.zeros(swt.shape)
for word in word_images:
final_mask += word
return final_mask
def predict(url):
global model
# Read image
image = io.imread(url)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)
# Use otsu to mask
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
mask = cv2.medianBlur(mask, 5)
features = describe(image, mask)
state = le.inverse_transform(model.predict([features]))[0]
return {'type': state}
def read_captured_circles(self):
img = cv2.cvtColor(self.query, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 7)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 30,
param1=50, param2=30, minRadius=20, maxRadius=50)
if circles is None:
return
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
if i[1] < 400:
continue
self.circlePoints.append((i[0], i[1]))
if self._debug:
self.draw_circles(circles, cimg)
def test_initial_pass_through_compare(self):
original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
against = self.provider.get_img_from_screen_shot()
wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))
# convert the images to grayscale
original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
# initialize the figure
(score, diff) = compare_ssim(original, against, full=True)
diff = (diff * 255).astype("uint8")
self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
(score, nothing) = compare_ssim(original, wrong, full=True)
self.assertTrue(score < .90)
if self.__debug_pictures__:
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0]
# loop over the contours
for c in cnts:
# compute the bounding box of the contour and then draw the
# bounding box on both input images to represent where the two
# images differ
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
# show the output images
diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
images = ("Original", original), ("Against", against), ("Wrong", wrong)
self.setup_compare_images(diffs)
self.setup_compare_images(images)
def predict():
response = requests.get(slide_captcha_url)
base64_image = response.json()['data']['dataUrl']
base64_image_without_head = base64_image.replace('data:image/png;base64,', '')
bytes_io = BytesIO(base64.b64decode(base64_image_without_head))
img = np.array(Image.open(bytes_io).convert('RGB'))
img_blur = cv2.GaussianBlur(img, (3, 3), 0)
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
img_canny = cv2.Canny(img_gray, 100, 200)
operator = get_operator('shape.png')
(x, y), _ = best_match(img_canny, operator)
x = x + bias
print('the position of x is', x)
buffer = mark(img, x, y)
return {'value': x, 'image': base64.b64encode(buffer.getbuffer()).decode()}
def __init__(self,img):
#making two copies of the same image
original_img = np.array(img)
new_img = np.array(img)
#resizing keeping the aspect ratio constant
a_ratio = new_img.shape[0]/new_img.shape[1]
#new_row=int(new_img.shape[0])
new_row = 128
new_colm = int(new_row/a_ratio)
new_img = cv2.resize(new_img, (new_colm,new_row), interpolation = cv2.INTER_AREA)
original_img = cv2.resize(original_img, (new_colm,new_row), interpolation = cv2.INTER_AREA)
#convert new_one to grayscale
new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)
self.original_img = original_img
self.new_img = new_img
def load(self, filename, analyze_only):
# Load image, then do various conversions and thresholding.
self.img_orig = cv2.imread(filename, cv2.IMREAD_COLOR)
if self.img_orig is None:
raise CompilerException("File '{}' not found".format(filename))
self.img_grey = cv2.cvtColor(self.img_orig, cv2.COLOR_BGR2GRAY)
_, self.img_contour = cv2.threshold(self.img_grey, 250, 255, cv2.THRESH_BINARY_INV)
_, self.img_text = cv2.threshold(self.img_grey, 150, 255, cv2.THRESH_BINARY)
self.root_node = None
self.contours = self.find_contours()
self.contour_lines, self.contour_nodes = self.categorize_contours()
self.build_graph()
self.build_parse_tree()
self.parse_nodes()
if not analyze_only:
self.python_ast = self.root_node.to_python_ast()
def open_img(self, name, color = 'RGB'):
""" Open an image
Args:
name : Name of the sample
color : Color Mode (RGB/BGR/GRAY)
"""
if name[-1] in self.letter:
name = name[:-1]
img = cv2.imread(os.path.join(self.img_dir, name))
if color == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
elif color == 'BGR':
return img
elif color == 'GRAY':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
print('Color mode supported: RGB/BGR. If you need another mode do it yourself :p')
facegroup.py 文件源码
项目:Automatic_Group_Photography_Enhancement
作者: Yuliang-Zou
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def getFaceData(img):
# Create the haar cascade
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Read the image
image = cv2.imread(img)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
facedata = image[y:y+h, x:x+w]
return facedata
def add_corners(self, i_frame, subpixel_criteria, frame_folder_path=None,
save_image=False, save_chekerboard_overlay=False):
grey_frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
cv2.cornerSubPix(grey_frame, self.current_image_points, (11, 11), (-1, -1), subpixel_criteria)
if save_image:
png_path = (os.path.join(frame_folder_path,
"{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png")))
cv2.imwrite(png_path, self.frame)
if save_chekerboard_overlay:
png_path = (os.path.join(frame_folder_path,
"checkerboard_{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png")))
overlay = self.frame.copy()
cv2.drawChessboardCorners(overlay, self.current_board_dims, self.current_image_points, True)
cv2.imwrite(png_path, overlay)
self.usable_frames[i_frame] = len(self.image_points)
self.image_points.append(self.current_image_points)
def load_images(folder_path):
os.chdir(folder_path)
image_files = glob.glob('*.JPG')
print('Found %s images' % len(image_files))
if len(image_files) == 0:
return
images = []
print('Loading images ', end='')
for file in image_files:
image = cv.imread(file)
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
images.append((gray_image, image))
print('.', end='')
sys.stdout.flush()
print('')
return images
def scanForFace():
while 1:
#This is where we will scan back and forth hunting for a face. We will
#begin turning the turret and scanning at the same time.
#Turn Servo one click right and start back other way when we max out
#Do a cap.read() command
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.3,
minNeighbors = 5
)
for (x,y,w,h) in faces:
foundFace = True
aimToFace()
def aimToFace():
while 1:
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.3,
minNeighbors = 5
)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
distance = 146.645*math.exp(-7.207e-3*w);
# print distance
if x < (halfScreen - 1.5*w):
#click servo right
print "Pan Right"
elif x > (halfScreen + 1.5*w):
#Click servo left
print "Pan Left"
else:
targetConfirmed = confirmTarget()
if targetConfirmed:
Launch(distance)
else:
break
Modules.py 文件源码
项目:apparent-age-gender-classification
作者: danielyou0230
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def facial_landmark_detection(image, detector, predictor, file):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_size = gray.shape
landmark_faces = detector(gray, 1)
faces = list()
area = 0
face_idx = 0
bItr = False
for (idx, landmark_faces) in enumerate(landmark_faces):
shape = predictor(gray, landmark_faces)
shape = shape_to_np(shape)
(x, y, w, h) = rect_to_bb(landmark_faces, img_size, file)
if (w * h) > area:
area = w * h
faces = [x, y, w, h]
bItr = True
#cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
#cv2.putText(image, "Face #{}".format(idx + 1), (x - 10, y - 10), \
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#for (x, y) in shape:
# cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
return bItr, faces
Modules.py 文件源码
项目:apparent-age-gender-classification
作者: danielyou0230
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def debug_face_classifier(file):
face_cascade = cv2.CascadeClassifier(xml_face_classifier)
image = cv2.imread(file)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(image, 1.07, 3)
print faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
#roi_gray = gray[y:y+h, x:x+w]
#roi_color = image[y:y+h, x:x+w]
cv2.imshow('Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def image(self):
img = cv2.imread(self.image_path)
img = imutils.resize(img,width=min(800,img.shape[1]))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(21,21),0)
fullbody = self.HogDescriptor(gray)
for (x,y,w,h) in fullbody:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
faces = self.haar_facedetection(gray)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = self.haar_eyedetection(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2)
smile = self.haar_smilecascade(roi_gray)
for (sx,sy,sw,sh) in smile:
cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
img = self.dlib_function(img)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def overlay_img(self):
"""Overlay the transparent, transformed image of the arc onto our CV image"""
#overlay the arc on the image
rows, cols, channels = self.transformed.shape
roi = self.cv_image[0:rows, 0:cols]
#change arc_image to grayscale
arc2gray = cv2.cvtColor(self.transformed, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(arc2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
#black out area of arc in ROI
img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img2_fg = cv2.bitwise_and(self.transformed, self.transformed, mask=mask)
#put arc on ROI and modify the main image
dst = cv2.add(img1_bg, img2_fg)
self.cv_image[0:rows, 0:cols] = dst
def display_video_stream(self):
r , frame = self.capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(40, 40),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
frame = cv2.flip(frame, 1)
image = QImage(frame, frame.shape[1], frame.shape[0],
frame.strides[0], QImage.Format_RGB888)
self.imageLabel.setPixmap(QPixmap.fromImage(image))