def rgb_callback(self,data):
try:
img = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
ret, corners = cv2.findChessboardCorners(img, (x_num,y_num))
cv2.imshow('img',img)
cv2.waitKey(5)
if ret == True:
cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
tempimg = img.copy()
cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret)
# ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist)
print("rvecs:")
print(rvec)
print("tvecs:")
print(tvec)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist)
imgpts = np.int32(imgpts).reshape(-1,2)
cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4) #BGR
cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4)
cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4)
cv2.imshow('img',tempimg)
cv2.waitKey(5)
python类projectPoints()的实例源码
pose_estimation.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def drawBox(self, img):
axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
[0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
imgpts = np.int32(imgpts).reshape(-1,2)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)
# draw top layer in red color
outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)
return outImg
# Debug Code.
def drawBox(self, img):
axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
[0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
imgpts = np.int32(imgpts).reshape(-1,2)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)
# draw top layer in red color
outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)
return outImg
# Debug Code.
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawBox(camera_parameters, markers, frame):
objpts = np.float32([[0,0,0], [1,0,0], [1,1,0], [0,1,0],
[0,0,1], [1,0,1], [1,1,1], [0,1,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(objpts, rvec, tvec, mtx, dist)
cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[0+4].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1+4].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2+4].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3+4].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawBox(camera_parameters, markers, frame):
objpts = np.float32([[0,0,0], [1,0,0], [1,1,0], [0,1,0],
[0,0,1], [1,0,1], [1,1,1], [0,1,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(objpts, rvec, tvec, mtx, dist)
cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[0+4].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1+4].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2+4].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3+4].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawBox(camera_parameters, markers, frame):
objpts = np.float32([[0,0,0], [1,0,0], [1,1,0], [0,1,0],
[0,0,1], [1,0,1], [1,1,1], [0,1,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(objpts, rvec, tvec, mtx, dist)
cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[0+4].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[1+4].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[2+4].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
cv2.line(frame, tuple(imgpts[3+4].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
def project(self, X, check_bounds=False, check_depth=False, return_depth=False, min_depth=0.1):
"""
Project [Nx3] points onto 2-D image plane [Nx2]
"""
R, t = self.to_Rt()
rvec,_ = cv2.Rodrigues(R)
proj,_ = cv2.projectPoints(X, rvec, t, self.K, self.D)
x = proj.reshape(-1,2)
if check_depth:
# Only return positive depths
depths = self.depth_from_projection(X)
valid = depths >= min_depth
else:
valid = np.ones(len(x), dtype=np.bool)
if check_bounds:
if self.shape is None:
raise ValueError('check_bounds cannot proceed. Camera.shape is not set')
# Only return points within-image bounds
valid = np.bitwise_and(
valid, np.bitwise_and(
np.bitwise_and(x[:,0] >= 0, x[:,0] < self.shape[1]), \
np.bitwise_and(x[:,1] >= 0, x[:,1] < self.shape[0]))
)
if return_depth:
return x[valid], depths[valid]
return x[valid]
def _map_monocular(self,p):
if '3d' not in p['method']:
return None
gaze_point = np.array(p['circle_3d']['normal'] ) * self.gaze_distance + np.array( p['sphere']['center'] )
image_point, _ = cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector, self.translation_vector , self.camera_matrix , self.dist_coefs )
image_point = image_point.reshape(-1,2)
image_point = normalize( image_point[0], self.world_frame_size , flip_y = True)
image_point = _clamp_norm_point(image_point)
eye_center = self.toWorld(p['sphere']['center'])
gaze_3d = self.toWorld(gaze_point)
normal_3d = np.dot( self.rotation_matrix, np.array( p['circle_3d']['normal'] ) )
g = { 'topic':'gaze',
'norm_pos':image_point,
'eye_center_3d':eye_center.tolist(),
'gaze_normal_3d':normal_3d.tolist(),
'gaze_point_3d':gaze_3d.tolist(),
'confidence':p['confidence'],
'timestamp':p['timestamp'],
'base_data':[p]}
if self.visualizer.window:
self.gaze_pts_debug.append( gaze_3d )
self.sphere['center'] = eye_center #eye camera coordinates
self.sphere['radius'] = p['sphere']['radius']
return g
def _map_monocular(self,p):
if '3d' not in p['method']:
return None
p_id = p['id']
gaze_point = np.array(p['circle_3d']['normal'] ) * self.last_gaze_distance + np.array( p['sphere']['center'] )
image_point, _ = cv2.projectPoints( np.array([gaze_point]) , self.rotation_vectors[p_id], self.translation_vectors[p_id] , self.camera_matrix , self.dist_coefs )
image_point = image_point.reshape(-1,2)
image_point = normalize( image_point[0], self.world_frame_size , flip_y = True)
image_point = _clamp_norm_point(image_point)
if p_id == 0:
eye_center = self.eye0_to_World(p['sphere']['center'])
gaze_3d = self.eye0_to_World(gaze_point)
else:
eye_center = self.eye1_to_World(p['sphere']['center'])
gaze_3d = self.eye1_to_World(gaze_point)
normal_3d = np.dot( self.rotation_matricies[p_id], np.array( p['circle_3d']['normal'] ) )
g = {
'topic':'gaze',
'norm_pos':image_point,
'eye_centers_3d':{p['id']:eye_center.tolist()},
'gaze_normals_3d':{p['id']:normal_3d.tolist()},
'gaze_point_3d':gaze_3d.tolist(),
'confidence':p['confidence'],
'timestamp':p['timestamp'],
'base_data':[p]}
if self.visualizer.window:
if p_id == 0:
self.gaze_pts_debug0.append(gaze_3d)
self.sphere0['center'] = eye_center
self.sphere0['radius'] = p['sphere']['radius']
else:
self.gaze_pts_debug1.append(gaze_3d)
self.sphere1['center'] = eye_center
self.sphere1['radius'] = p['sphere']['radius']
return g
def project_distort_pts(pts_xyz,camera_matrix, dist_coefs, rvec = np.array([0,0,0], dtype=np.float32), tvec = np.array([0,0,0], dtype=np.float32) ):
# projectPoints is the inverse of function implemented above --> should map the intermediate result to the original input
pts2d, _ = cv2.projectPoints(pts_xyz, rvec , tvec, camera_matrix, dist_coefs)
return pts2d.reshape(-1,2)
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
def find_reprojection_error(self, i_usable_frame, object_points, intrinsics):
rotation_vector = self.poses[i_usable_frame].rvec
translation_vector = self.poses[i_usable_frame].tvec
img_pts = self.image_points[i_usable_frame]
est_pts = cv2.projectPoints(object_points, rotation_vector, translation_vector,
intrinsics.intrinsic_mat, intrinsics.distortion_coeffs)[0]
rms = math.sqrt(((img_pts - est_pts) ** 2).sum() / len(object_points))
return rms
# TODO: passing in both frame_folder_path and save_image doesn't make sense. Make saving dependent on the former.
def draw3dCoordAxis(self, img=None, thickness=8):
'''
draw the 3d coordinate axes into given image
if image == False:
create an empty image
'''
if img is None:
img = self.img
elif img is False:
img = np.zeros(shape=self.img.shape, dtype=self.img.dtype)
else:
img = imread(img)
# project 3D points to image plane:
# self.opts['obj_width_mm'], self.opts['obj_height_mm']
w, h = self.opts['new_size']
axis = np.float32([[0.5 * w, 0.5 * h, 0],
[w, 0.5 * h, 0],
[0.5 * w, h, 0],
[0.5 * w, 0.5 * h, -0.5 * w]])
t, r = self.pose()
imgpts = cv2.projectPoints(axis, r, t,
self.opts['cameraMatrix'],
self.opts['distCoeffs'])[0]
mx = int(img.max())
origin = tuple(imgpts[0].ravel())
cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness)
cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness)
cv2.line(
img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0), thickness * 2)
return img
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)
def project_xy(xy_coords, pvec):
# get cubic polynomial coefficients given
#
# f(0) = 0, f'(0) = alpha
# f(1) = 0, f'(1) = beta
alpha, beta = tuple(pvec[CUBIC_IDX])
poly = np.array([
alpha + beta,
-2*alpha - beta,
alpha,
0])
xy_coords = xy_coords.reshape((-1, 2))
z_coords = np.polyval(poly, xy_coords[:, 0])
objpoints = np.hstack((xy_coords, z_coords.reshape((-1, 1))))
image_points, _ = cv2.projectPoints(objpoints,
pvec[RVEC_IDX],
pvec[TVEC_IDX],
K, np.zeros(5))
return image_points
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
pose_estimation.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def ir_callback(self,data):
try:
gray = self.mkgray(data)
except CvBridgeError as e:
print(e)
# ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num))
cv2.imshow('img',gray)
cv2.waitKey(5)
if ret == True:
cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
tempimg = gray.copy()
cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret)
# ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, depth_mtx, depth_dist)
print("rvecs:")
print(rvec)
print("tvecs:")
print(tvec)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, depth_mtx, depth_dist)
imgpts = np.int32(imgpts).reshape(-1,2)
cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4) #BGR
cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4)
cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4)
cv2.imshow('img',tempimg)
cv2.waitKey(5)
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
video.py 文件源码
项目:Image-Processing-and-Feature-Detection
作者: amita-kapoor
项目源码
文件源码
阅读 39
收藏 0
点赞 0
评论 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)