def drawlines(img, points):
filler = cv2.convexHull(points)
cv2.polylines(img, filler, True, (0, 0, 0), thickness=2)
return img
python类convexHull()的实例源码
def extract_corners(self, image):
"""
Find the 4 corners of a binary image
:param image: binary image
:return: 4 main vertices or None
"""
cnts, _ = cv2.findContours(image.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
cnt = cnts[0]
_, _, h, w = cv2.boundingRect(cnt)
epsilon = min(h, w) * 0.5
vertices = cv2.approxPolyDP(cnt, epsilon, True)
vertices = cv2.convexHull(vertices, clockwise=True)
vertices = self.correct_vertices(vertices)
return vertices
def get_rectangles(contours):
rectangles = []
for contour in contours:
epsilon = 0.04*cv2.arcLength(contour,True)
hull = cv2.convexHull(contour)
approx = cv2.approxPolyDP(hull,epsilon,True)
if (len(approx) == 4 and cv2.isContourConvex(approx)):
rectangles.append(approx)
return rectangles
def find_contours(self, img):
thresh_img = self.threshold(img)
_, contours, _ = cv2.findContours(thresh_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
result = []
for cnt in contours:
approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), True)
if self.draw_approx:
cv2.drawContours(self.out, [approx], -1, self.BLUE, 2, lineType=8)
if len(approx) > 3 and len(approx) < 15:
_, _, w, h = cv2.boundingRect(approx)
if h > self.min_height and w > self.min_width:
hull = cv2.convexHull(cnt)
approx2 = cv2.approxPolyDP(hull,0.01*cv2.arcLength(hull,True),True)
if self.draw_approx2:
cv2.drawContours(self.out, [approx2], -1, self.GREEN, 2, lineType=8)
result.append(approx2)
return result
def drawlines(img, points):
filler = cv2.convexHull(points)
cv2.polylines(img, filler, True, (0, 0, 0), thickness=2)
return img
def _get_tip_position(array, contour, verbose = False):
approx_contour = cv2.approxPolyDP(contour, 0.08 * cv2.arcLength(contour, True), True)
convex_points = cv2.convexHull(approx_contour, returnPoints = True)
cx, cy = 999, 999
for point in convex_points:
cur_cx, cur_cy = point[0][0], point[0][1]
if verbose:
cv2.circle(array, (cur_cx, cur_cy), 4, _COLOR_GREEN,4)
if (cur_cy < cy):
cx, cy = cur_cx, cur_cy
(screen_x, screen_y) = pyautogui.size()
height, width, _ = array.shape
x = _round_int((float(cx))/(width-0)*(screen_x+1))
y = _round_int((float(cy))/(height-0)*(screen_y+1))
return (array, (x, y))
def get_corners_from_contours(contours, corner_amount=4):
"""
Finds four corners from a list of points on the goal
epsilon - the minimum side length of the polygon generated by the corners
Parameters:
:param: `contours` - a numpy array of points (opencv contour) of the
points to get corners from
:param: `corner_amount` - the number of corners to find
"""
coefficient = .05
while True:
# print(contours)
epsilon = coefficient * cv2.arcLength(contours, True)
# epsilon =
# print("epsilon:", epsilon)
poly_approx = cv2.approxPolyDP(contours, epsilon, True)
hull = cv2.convexHull(poly_approx)
if len(hull) == corner_amount:
return hull
else:
if len(hull) > corner_amount:
coefficient += .01
else:
coefficient -= .01
def extract_corners(self, image):
"""
Find the 4 corners of a binary image
:param image: binary image
:return: 4 main vertices or None
"""
cnts, _ = cv2.findContours(image.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
cnt = cnts[0]
_, _, h, w = cv2.boundingRect(cnt)
epsilon = min(h, w) * 0.5
o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
vertices = cv2.convexHull(o_vertices, clockwise=True)
vertices = self.correct_vertices(vertices)
if self.debug:
temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
self.save2image(temp)
return vertices
data_preprocessing_autoencoder.py 文件源码
项目:AVSR-Deep-Speech
作者: pandeydivesh15
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def visualize(frame, coordinates_list, alpha = 0.80, color=[255, 255, 255]):
"""
Args:
1. frame: OpenCV's image which has to be visualized.
2. coordinates_list: List of coordinates which will be visualized in the given `frame`
3. alpha, color: Some parameters which help in visualizing properly.
A convex hull will be shown for each element in the `coordinates_list`
"""
layer = frame.copy()
output = frame.copy()
for coordinates in coordinates_list:
c_hull = cv2.convexHull(coordinates)
cv2.drawContours(layer, [c_hull], -1, color, -1)
cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output)
cv2.imshow("Output", output)
def get_corners(contour):
"""
Given a contour that should have a rectangular convex hull, produce a sorted list of corners for the bounding rectangle
:param contour:
:return:
"""
hull = cv2.convexHull(contour)
hull_poly = cv2.approxPolyDP(hull, 0.05 * cv2.arcLength(hull, True), True)
return sort_corners(hull_poly)
def fill(img, points):
filler = cv2.convexHull(points)
cv2.fillConvexPoly(img, filler, 255)
return img
def blendImages(src, dst, mask, featherAmount=0.2):
#indeksy nie czarnych pikseli maski
maskIndices = np.where(mask != 0)
#te same indeksy tylko, ze teraz w jednej macierzy, gdzie kazdy wiersz to jeden piksel (x, y)
maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis]))
faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0)
featherAmount = featherAmount * np.max(faceSize)
hull = cv2.convexHull(maskPts)
dists = np.zeros(maskPts.shape[0])
for i in range(maskPts.shape[0]):
dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True)
weights = np.clip(dists / featherAmount, 0, 1)
composedImg = np.copy(dst)
composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]]
return composedImg
#uwaga, tutaj src to obraz, z ktorego brany bedzie kolor
def camera_gesture_trigger():
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
max_area=0
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area=area
ci=i
cnt=contours[ci]
hull = cv2.convexHull(cnt)
moments = cv2.moments(cnt)
cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
if defects is not None:
if defects.shape[0] >= 5:
return 1
return 0
def warpFace3D(im, oldMesh, pose, newMesh, accurate=False, fitter=None):
oldVerts2d = projectMeshTo2D(oldMesh, pose, im)
newVerts2d = projectMeshTo2D(newMesh, pose, im)
if not accurate and fitter is not None:
ALL_FACE_MESH_VERTS = fitter.landmarks_2_vert_indices[ALL_FACE_LANDMARKS]
ALL_FACE_MESH_VERTS = np.delete(ALL_FACE_MESH_VERTS, np.where(ALL_FACE_MESH_VERTS == -1)).tolist()
oldConvexHullIndexs = cv2.convexHull(oldVerts2d.astype(np.float32), returnPoints=False)
warpPointIndexs = oldConvexHullIndexs.flatten().tolist() + ALL_FACE_MESH_VERTS
oldVerts2d = oldVerts2d[warpPointIndexs]
newVerts2d = newVerts2d[warpPointIndexs]
warpedIm = warpFace(im, oldVerts2d, newVerts2d)
return warpedIm
def decomposePose(mesh, pose, im):
modelview = np.matrix(pose.get_modelview())
proj = np.matrix(pose.get_projection())
viewport = np.array([0, im.shape[0], im.shape[1], -im.shape[0]])
modelview = modelview.tolist()
projection = proj.tolist()
viewport = viewport.tolist()
ALL_FACE_MESH_VERTS = BFM_FACEFITTING.landmarks_2_vert_indices[ALL_FACE_LANDMARKS]
ALL_FACE_MESH_VERTS = np.delete(ALL_FACE_MESH_VERTS, np.where(ALL_FACE_MESH_VERTS == -1)).tolist()
verts2d = projectMeshTo2D(mesh, pose, im)
convexHullIndexs = cv2.convexHull(verts2d.astype(np.float32), returnPoints=False)
warpPointIndexs = convexHullIndexs.flatten().tolist() + ALL_FACE_MESH_VERTS
indexs = warpPointIndexs
return modelview, projection, viewport, indexs
def drawConvexHull(img, contours):
cnt = contours[0]
mask = np.zeros(img.shape, np.uint8)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(mask,start,end,[255,255,255],5)
cv2.circle(mask,far,5,[255,255,255],-1)
(x,y),radius = cv2.minEnclosingCircle(cnt)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(mask,center,radius,(255,255,255),-1)
return mask
def calculateFingers(res,drawing): # -> finished bool, cnt: finger count
# convexity defect
hull = cv2.convexHull(res, returnPoints=False)
if len(hull) > 3:
defects = cv2.convexityDefects(res, hull)
if type(defects) != type(None): # avoid crashing. (BUG not found)
cnt = 0
for i in range(defects.shape[0]): # calculate the angle
s, e, f, d = defects[i][0]
start = tuple(res[s][0])
end = tuple(res[e][0])
far = tuple(res[f][0])
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) # cosine theorem
if angle <= math.pi / 2: # angle less than 90 degree, treat as fingers
cnt += 1
cv2.circle(drawing, far, 8, [211, 84, 0], -1)
return True, cnt
return False, 0
# Camera
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.1, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def solidity(contour, hull=None, area_min=0):
"""
Calculate the solidity of a contour, which is the ratio of its area to the
area of its convex hull.
:param contour: the target contour
:param hull: the convex hull of the contour, see cv2.convexHull(contour)
:param area_min: a contour with an area below the minimum has a solidity of 0
"""
if hull is None:
hull = cv2.convexHull(contour)
try:
contour_area = cv2.contourArea(contour)
hull_area = cv2.contourArea(hull)
return contour_area / hull_area if hull_area > area_min else 0
except ArithmeticError:
return 0
def solidity(contour, hull=None, area_min=0):
"""
Calculate the solidity of a contour, which is the ratio of its area to the
area of its convex hull.
:param contour: the target contour
:param hull: the convex hull of the contour, see cv2.convexHull(contour)
:param area_min: a contour with an area below the minimum has a solidity of 0
"""
if hull is None:
hull = cv2.convexHull(contour)
try:
contour_area = cv2.contourArea(contour)
hull_area = cv2.contourArea(hull)
return contour_area / hull_area if hull_area > area_min else 0
except ArithmeticError:
return 0
def partial_blur(img, points, kenel_size = 9, type = 1):
"""
Partial Gaussian blur within convex hull of points.
Args:
type = 0 for Gaussian blur
type = 1 for average blur
"""
points = cv2.convexHull(points)
copy_img = img.copy()
black = (0, 0, 0)
if type:
cv2.blur(img, (kenel_size, kenel_size))
else:
cv2.GaussianBlur(img, (kenel_size, kenel_size), 0)
cv2.fillConvexPoly(copy_img, points, color = black)
for row in range(img.shape[:2][0]):
for col in range(img.shape[:2][1]):
if numpy.array_equal(copy_img[row][col], black):
copy_img[row][col] = blur_img[row][col]
return copy_img
def get_face_mask(img, img_l):
img = np.zeros(img.shape[:2], dtype = np.float64)
for idx in OVERLAY_POINTS_IDX:
cv2.fillConvexPoly(img, cv2.convexHull(img_l[idx]), color = 1)
img = np.array([img, img, img]).transpose((1, 2, 0))
img = (cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) > 0) * 1.0
img = cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0)
return img
def get_contour_mask(dshape, img_fl):
mask = np.zeros(dshape)
hull = cv2.convexHull(img_fl)
cv2.drawContours(mask, [hull], 0, (1, 1, 1) , -1)
return np.uint8(mask)
# Orients input_ mask onto tmpl_ face
def convex_hull(pts, ccw=True):
"""
Returns the convex hull of points, ordering them in ccw/cw fashion
Note: Since the orientation of the coordinate system is x-right,
y-up, ccw is interpreted as cw in the function call.
"""
assert(pts.ndim == 2 and pts.shape[1] == 2)
return (cv2.convexHull(pts.reshape(-1,1,2), clockwise=ccw)).reshape(-1,2)
# ===========================================================================
# BBOX-functions
def convex_hulls(contours):
"""
Convenience method to get a list of convex hulls from list of contours
:param contours: contours that should be turned into convex hulls
:return: a list of convex hulls that match each contour
"""
hulls = []
for contour in contours:
hulls.append(cv2.convexHull(contour))
return hulls
def draw_convex_hull(self,im, points, color):
'''
??????
'''
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def __init__(self,g_pool):
super().__init__(g_pool)
self.menu=None
logger.error("This will be implemented as part of gaze mapper soon.")
self.alive= False
return
width,height = self.g_pool.capture.frame_size
if g_pool.app == 'capture':
cal_pt_path = os.path.join(g_pool.user_dir,"user_calibration_data")
else:
cal_pt_path = os.path.join(g_pool.rec_dir,"user_calibration_data")
try:
user_calibration_data = load_object(cal_pt_path)
except:
logger.warning("Please calibrate first")
self.close()
return
if self.g_pool.binocular:
fn_input_eye1 = cal_pt_cloud[:,2:4].transpose()
cal_pt_cloud[:,0:2] = np.array(map_fn(fn_input_eye0, fn_input_eye1)).transpose()
cal_pt_cloud[:,2:4] = cal_pt_cloud[:,4:6]
else:
fn_input = cal_pt_cloud[:,0:2].transpose()
cal_pt_cloud[:,0:2] = np.array(map_fn(fn_input)).transpose()
ref_pts = cal_pt_cloud[inlier_map][:,np.newaxis,2:4]
ref_pts = np.array(ref_pts,dtype=np.float32)
logger.debug("calibration ref_pts %s"%ref_pts)
if len(ref_pts)== 0:
logger.warning("Calibration is bad. Please re-calibrate")
self.close()
return
self.calib_bounds = cv2.convexHull(ref_pts)
# create a list [[px1,py1],[wx1,wy1],[px2,py2],[wx2,wy2]...] of outliers and inliers for gl_lines
self.outliers = np.concatenate((cal_pt_cloud[~inlier_map][:,0:2],cal_pt_cloud[~inlier_map][:,2:4])).reshape(-1,2)
self.inliers = np.concatenate((cal_pt_cloud[inlier_map][:,0:2],cal_pt_cloud[inlier_map][:,2:4]),axis=1).reshape(-1,2)
self.inlier_ratio = cal_pt_cloud[inlier_map].shape[0]/float(cal_pt_cloud.shape[0])
self.inlier_count = cal_pt_cloud[inlier_map].shape[0]
# hull = cv2.approxPolyDP(self.calib_bounds, 0.001,closed=True)
full_screen_area = 1.
logger.debug("calibration bounds %s"%self.calib_bounds)
self.calib_area_ratio = cv2.contourArea(self.calib_bounds)/full_screen_area