def gl_display(self):
for grid_points in self.img_points:
calib_bounds = cv2.convexHull(grid_points)[:,0] #we dont need that extra encapsulation that opencv likes so much
draw_polyline(calib_bounds,1,RGBA(0.,0.,1.,.5),line_type=gl.GL_LINE_LOOP)
if self._window:
self.gl_display_in_window()
if self.show_undistortion:
gl.glPushMatrix()
make_coord_system_norm_based()
draw_gl_texture(self.undist_img)
gl.glPopMatrix()
python类convexHull()的实例源码
def build_correspondance(self, visible_markers,camera_calibration,min_marker_perimeter,min_id_confidence):
"""
- use all visible markers
- fit a convex quadrangle around it
- use quadrangle verts to establish perpective transform
- map all markers into surface space
- build up list of found markers and their uv coords
"""
all_verts = [m['verts'] for m in visible_markers if m['perimeter']>=min_marker_perimeter]
if not all_verts:
return
all_verts = np.array(all_verts,dtype=np.float32)
all_verts.shape = (-1,1,2) # [vert,vert,vert,vert,vert...] with vert = [[r,c]]
# all_verts_undistorted_normalized centered in img center flipped in y and range [-1,1]
all_verts_undistorted_normalized = cv2.undistortPoints(all_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs']*self.use_distortion)
hull = cv2.convexHull(all_verts_undistorted_normalized,clockwise=False)
#simplify until we have excatly 4 verts
if hull.shape[0]>4:
new_hull = cv2.approxPolyDP(hull,epsilon=1,closed=True)
if new_hull.shape[0]>=4:
hull = new_hull
if hull.shape[0]>4:
curvature = abs(GetAnglesPolyline(hull,closed=True))
most_acute_4_threshold = sorted(curvature)[3]
hull = hull[curvature<=most_acute_4_threshold]
# all_verts_undistorted_normalized space is flipped in y.
# we need to change the order of the hull vertecies
hull = hull[[1,0,3,2],:,:]
# now we need to roll the hull verts until we have the right orientation:
# all_verts_undistorted_normalized space has its origin at the image center.
# adding 1 to the coordinates puts the origin at the top left.
distance_to_top_left = np.sqrt((hull[:,:,0]+1)**2+(hull[:,:,1]+1)**2)
bot_left_idx = np.argmin(distance_to_top_left)+1
hull = np.roll(hull,-bot_left_idx,axis=0)
#based on these 4 verts we calculate the transformations into a 0,0 1,1 square space
m_from_undistored_norm_space = m_verts_from_screen(hull)
self.detected = True
# map the markers vertices into the surface space (one can think of these as texture coordinates u,v)
marker_uv_coords = cv2.perspectiveTransform(all_verts_undistorted_normalized,m_from_undistored_norm_space)
marker_uv_coords.shape = (-1,4,1,2) #[marker,marker...] marker = [ [[r,c]],[[r,c]] ]
# build up a dict of discovered markers. Each with a history of uv coordinates
for m,uv in zip (visible_markers,marker_uv_coords):
try:
self.markers[m['id']].add_uv_coords(uv)
except KeyError:
self.markers[m['id']] = Support_Marker(m['id'])
self.markers[m['id']].add_uv_coords(uv)
#average collection of uv correspondences accros detected markers
self.build_up_status = sum([len(m.collected_uv_coords) for m in self.markers.values()])/float(len(self.markers))
if self.build_up_status >= self.required_build_up:
self.finalize_correnspondance()
def get_defects(self, cnt, drawing):
defects = None
hull = cv2.convexHull(cnt)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 0)
hull = cv2.convexHull(cnt, returnPoints=False) # For finding defects
if hull.size > 2:
defects = cv2.convexityDefects(cnt, hull)
return defects
# Gesture Recognition
def mask_from_points(size, points):
mask = np.zeros(size, np.uint8)
cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)
return mask
def do_touch(self):
width, height = 1080, 1920
screen = self.device.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=lambda cnt: len(cnt), reverse=True)
rects = []
for cnt in contours:
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
x,y,w,h = cv2.boundingRect(cnt)
rect_area = float(w*h)
if w<20 or h<20 or rect_area<100:
continue
if hull_area/rect_area < 0.50:
continue
rects.append((x, y, x+w, y+h))
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
if not rects:
x, y = randint(1, width), randint(1, height)
else:
x1, y1, x2, y2 = choice(rects)
x, y = randint(x1, x2), randint(y1, y2)
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
x, y = self.device.screen2touch(x*2, y*2)
self.device.touch(x, y)
cv2.imshow('img', img)
cv2.waitKey(1)
def do_touch(self):
width, height = 1080, 1920
screen = self.device.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=lambda cnt: len(cnt), reverse=True)
rects = []
for cnt in contours:
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
x,y,w,h = cv2.boundingRect(cnt)
rect_area = float(w*h)
if w<20 or h<20 or rect_area<100:
continue
if hull_area/rect_area < 0.50:
continue
rects.append((x, y, x+w, y+h))
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
if not rects:
x, y = randint(1, width), randint(1, height)
else:
x1, y1, x2, y2 = choice(rects)
x, y = randint(x1, x2), randint(y1, y2)
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
x, y = self.device.screen2touch(x*2, y*2)
self.device.touch(x, y)
cv2.imshow('img', img)
cv2.waitKey(1)
faceswap.py 文件源码
项目:Automatic_Group_Photography_Enhancement
作者: Yuliang-Zou
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_convex_hull(contour):
return cv2.convexHull(contour)
def _find_hull_defects(self, segment):
# Use cv2 findContours function to find all the contours in segmented img
contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# assume largest contour is the one of interest
max_contour = max(contours, key=cv2.contourArea)
epsilon = 0.01*cv2.arcLength(max_contour, True)
max_contour = cv2.approxPolyDP(max_contour, epsilon, True)
# determine convex hull & convexity defects of the hull
hull = cv2.convexHull(max_contour, returnPoints=False)
defects = cv2.convexityDefects(max_contour, hull)
return (max_contour, defects)
def fill(img, points):
filler = cv2.convexHull(points)
cv2.fillConvexPoly(img, filler, 255)
return img
def mask_from_points(size, points):
mask = np.zeros(size, np.uint8)
cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)
return mask
def get_landmarks(self, img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rects = self.detector(img, 0)
if len(rects) > 1:
print 'TooManyFaces'
if len(rects) == 0:
raise ValueError('Error: NoFaces!!')
landmarks = np.matrix([[p.x, p.y]
for p in self.predictor(img, rects[0]).parts()])
for group in self.MOUTH_POINTS:
hull = cv2.convexHull(landmarks[group])
return hull
def get_face_mask(self,img,landmarks):
for group in self.OVERLAY_POINTS:
hull = cv2.convexHull(landmarks[group])
cv2.fillConvexPoly(img, hull, 0)
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
continue
if (h < min_height or h > max_height):
continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
if (cv2.arcLength(contour, True) < min_perimeter):
continue
hull = cv2.convexHull(contour)
solid = 100 * area / cv2.contourArea(hull)
if (solid < solidity[0] or solid > solidity[1]):
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
continue
ratio = (float)(w) / h
if (ratio < min_ratio or ratio > max_ratio):
continue
output.append(contour)
return output
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
print(points)
def get_patches(segment_arr):
ret = []
im = segment_arr.astype(np.uint8)
contours = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hulls = [cv2.convexHull(cont) for cont in contours[1]] #seems my version of CV2 (3.0) uses [1]
for contour_idx in xrange(len(hulls)):
cimg = np.zeros_like(im)
cv2.drawContours(cimg, hulls, contour_idx, color=255, thickness=-1)
pts = np.array(np.where(cimg == 255)).T
ret.append(pts)
return ret
def get_patches(segment_arr):
ret = []
im = segment_arr.astype(np.uint8)*255
contours = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hulls = [cv2.convexHull(cont) for cont in contours[0]]
for contour_idx in xrange(len(hulls)):
cimg = np.zeros_like(im)
cv2.drawContours(cimg, hulls, contour_idx, color=255, thickness=-1)
pts = np.array(np.where(cimg == 255)).T
ret.append(pts)
return ret
def contourImg(image):
# Find contours in the image
_, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image.fill(0)
# Filter out some contours
i = 0
found = False
cnt = []
boundingRects = []
while i < len(contours):
if cv2.contourArea(contours[i]) > 200:
approx = cv2.convexHull(contours[i])
x,y,w,h = cv2.boundingRect(contours[i])
aspect = w/h
coverage = w*h/cv2.contourArea(contours[i])
if(abs(aspect - .4) < .1 and coverage > 0.85):
boundingRects.append([x,y,w,h])
i += 1
i = 0
while i < len(boundingRects):
j = i+1
while j < len(boundingRects):
if(abs(boundingRects[i][1] - boundingRects[j][1]) < 20 and abs(((boundingRects[i][0] - boundingRects[j][0]) / boundingRects[i][1]) - 1.65 < .1)):
return (createRectCnt(boundingRects[i]), createRectCnt(boundingRects[j]))
j+=1
i+=1
return -1
# Define color range