def rgb_callback(self,data):
try:
self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
cv2.imshow('rgb_img',self.rgb_img)
cv2.waitKey(5)
if rgb_ret == True:
rgb_tempimg = self.rgb_img.copy()
cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)
cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
print("The world coordinate system's origin in camera's coordinate system:")
print("===rgb_camera rvec:")
print(rgb_rvec)
print("===rgb_camera rmat:")
print(self.rgb_rmat)
print("===rgb_camera tvec:")
print(self.rgb_tvec)
print("rgb_camera_shape: ")
print(self.rgb_img.shape)
print("The camera origin in world coordinate system:")
print("===camera rmat:")
print(self.rgb_rmat.T)
print("===camera tvec:")
print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))
rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
yaml.dump(data, rgb_stream)
cv2.imshow('rgb_img',rgb_tempimg)
cv2.waitKey(5)
python类Rodrigues()的实例源码
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def ir_callback(self,data):
try:
self.ir_img = self.mkgray(data)
except CvBridgeError as e:
print(e)
ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
cv2.imshow('ir_img',self.ir_img)
cv2.waitKey(5)
if ir_ret == True:
ir_tempimg = self.ir_img.copy()
cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)
cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
# ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)
print("The world coordinate system's origin in camera's coordinate system:")
print("===ir_camera rvec:")
print(ir_rvec)
print("===ir_camera rmat:")
print(self.ir_rmat)
print("===ir_camera tvec:")
print(self.ir_tvec)
print("ir_camera_shape: ")
print(self.ir_img.shape)
print("The camera origin in world coordinate system:")
print("===camera rmat:")
print(self.ir_rmat.T)
print("===camera tvec:")
print(-np.dot(self.ir_rmat.T, self.ir_tvec))
depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
yaml.dump(data, depth_stream)
cv2.imshow('ir_img',ir_tempimg)
cv2.waitKey(5)
def detect(self, img):
if len(img.shape) > 2:
raise Exception("ChessboardDetector uses gray image as input")
detection = None
ret, corners = cv2.findChessboardCorners(img, self.chess_shape, None)
if ret:
ret, rvec, tvec = cv2.solvePnP(self.obj_points, corners, self.camera.matrix(), np.array([0, 0, 0, 0, 0]))
# invert axis convention
rvec[1] = -rvec[1]
rvec[2] = -rvec[2]
tvec[1] = -tvec[1]
tvec[2] = -tvec[2]
detection = Transform()
detection.matrix[0:3, 0:3] = cv2.Rodrigues(rvec)[0]
detection.set_translation(tvec[0] / 1000, tvec[1] / 1000, tvec[2] / 1000)
return detection
def render(self, markers):
for marker in markers:
rvecs, tvecs, marker_rotation, marker_name = marker
# build view matrix
rmtx = cv2.Rodrigues(rvecs)[0]
view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
[rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
[rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
[0.0 ,0.0 ,0.0 ,1.0 ]])
view_matrix = view_matrix * self.INVERSE_MATRIX
view_matrix = np.transpose(view_matrix)
# load view matrix and draw cube
glPushMatrix()
glLoadMatrixd(view_matrix)
if marker_name == MARKER_ONE:
self.marker_one_textures[TEXTURE_FRONT] = cv2.flip(self._get_video_frame(), 0)
self._draw_cube(marker_rotation, self.marker_one_textures)
elif marker_name == MARKER_TWO:
self._draw_cube(marker_rotation, self.marker_two_textures)
glColor3f(1.0, 1.0, 1.0)
glPopMatrix()
def project(self, X, check_bounds=False, check_depth=False, return_depth=False, min_depth=0.1):
"""
Project [Nx3] points onto 2-D image plane [Nx2]
"""
R, t = self.to_Rt()
rvec,_ = cv2.Rodrigues(R)
proj,_ = cv2.projectPoints(X, rvec, t, self.K, self.D)
x = proj.reshape(-1,2)
if check_depth:
# Only return positive depths
depths = self.depth_from_projection(X)
valid = depths >= min_depth
else:
valid = np.ones(len(x), dtype=np.bool)
if check_bounds:
if self.shape is None:
raise ValueError('check_bounds cannot proceed. Camera.shape is not set')
# Only return points within-image bounds
valid = np.bitwise_and(
valid, np.bitwise_and(
np.bitwise_and(x[:,0] >= 0, x[:,0] < self.shape[1]), \
np.bitwise_and(x[:,1] >= 0, x[:,1] < self.shape[0]))
)
if return_depth:
return x[valid], depths[valid]
return x[valid]
def __init__(self, g_pool, eye_camera_to_world_matrix , camera_intrinsics ,cal_points_3d, cal_ref_points_3d, cal_gaze_points_3d, gaze_distance = 500 ):
super().__init__(g_pool)
self.eye_camera_to_world_matrix = eye_camera_to_world_matrix
self.rotation_matrix = self.eye_camera_to_world_matrix[:3,:3]
self.rotation_vector = cv2.Rodrigues( self.rotation_matrix )[0]
self.translation_vector = self.eye_camera_to_world_matrix[:3,3]
self.camera_matrix = camera_intrinsics['camera_matrix']
self.dist_coefs = camera_intrinsics['dist_coefs']
self.world_frame_size = camera_intrinsics['resolution']
self.camera_intrinsics = camera_intrinsics
self.cal_points_3d = cal_points_3d
self.cal_ref_points_3d = cal_ref_points_3d
self.cal_gaze_points_3d = cal_gaze_points_3d
self.visualizer = Calibration_Visualizer(g_pool, camera_intrinsics ,cal_points_3d, cal_ref_points_3d,eye_camera_to_world_matrix, cal_gaze_points_3d)
self.g_pool = g_pool
self.gaze_pts_debug = []
self.sphere = {}
self.gaze_distance = gaze_distance
def fun(self, x, params):
#skalowanie
s = params[0]
#rotacja
r = params[1:4]
#przesuniecie (translacja)
t = params[4:6]
w = params[6:]
mean3DShape = x[0]
blendshapes = x[1]
#macierz rotacji z wektora rotacji, wzor Rodriguesa
R = cv2.Rodrigues(r)[0]
P = R[:2]
shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)
projected = s * np.dot(P, shape3D) + t[:, np.newaxis]
return projected
def jacobian(self, params, x, y):
s = params[0]
r = params[1:4]
t = params[4:6]
w = params[6:]
mean3DShape = x[0]
blendshapes = x[1]
R = cv2.Rodrigues(r)[0]
P = R[:2]
shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)
nPoints = mean3DShape.shape[1]
#nSamples * 2 poniewaz kazdy punkt ma dwa wymiary (x i y)
jacobian = np.zeros((nPoints * 2, self.nParams))
jacobian[:, 0] = np.dot(P, shape3D).flatten()
stepSize = 10e-4
step = np.zeros(self.nParams)
step[1] = stepSize;
jacobian[:, 1] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
step = np.zeros(self.nParams)
step[2] = stepSize;
jacobian[:, 2] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
step = np.zeros(self.nParams)
step[3] = stepSize;
jacobian[:, 3] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
jacobian[:nPoints, 4] = 1
jacobian[nPoints:, 5] = 1
startIdx = self.nParams - self.nBlendshapes
for i in range(self.nBlendshapes):
jacobian[:, i + startIdx] = s * np.dot(P, blendshapes[i]).flatten()
return jacobian
#nie uzywane
def getShape3D(mean3DShape, blendshapes, params):
#skalowanie
s = params[0]
#rotacja
r = params[1:4]
#przesuniecie (translacja)
t = params[4:6]
w = params[6:]
#macierz rotacji z wektora rotacji, wzor Rodriguesa
R = cv2.Rodrigues(r)[0]
shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)
shape3D = s * np.dot(R, shape3D)
shape3D[:2, :] = shape3D[:2, :] + t[:, np.newaxis]
return shape3D
def imgPointToWorldCoord(XY, rvec,
tvec, cameraMatrix, zconst=0):
'''
@returns 3d object coords
:param (ix,iy): list of 2d points (x,y) in image
:param zconst: height above image plane (if 0, than on image plane)
http://answers.opencv.org/question/62779/image-coordinate-to-world-coordinate-opencv/
and
http://stackoverflow.com/questions/12299870/computing-x-y-coordinate-3d-from-image-point
'''
(ix, iy) = XY
uvPoint = np.array([ix.ravel(), iy.ravel(), np.ones(
shape=ix.size)]).reshape(3, ix.size)
R = cv2.Rodrigues(rvec)[0]
iR = inv(R)
iC = inv(cameraMatrix)
t = iR.dot(iC).dot(uvPoint)
t2 = iR.dot(tvec)
s = (zconst + t2[2]) / t[2]
objP = (iR.dot(s * iC.dot(uvPoint) - tvec))
return objP
def rvec2euler(rvec):
return mat2euler(cv2.Rodrigues(rvec)[0], axes='rzxy')
def euler2rvec(a0, a1, a2):
return cv2.Rodrigues(euler2mat(a0, a1, a2, axes='rzxy'))[0]
def objectOrientation(self):
tvec, r = self.pose()
eulerAngles = mat2euler(cv2.Rodrigues(r)[0], axes='rzxy')
tilt = eulerAngles[1]
rot = eulerAngles[0]
dist = tvec[2, 0] # only take depth component np.linalg.norm(tvec)
return dist, tilt, rot
def camera_position(self, pose=None):
'''
returns camera position in world coordinates using self.rvec and self.tvec
from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector
'''
if pose is None:
pose = self.pose()
t, r = pose
return -np.matrix(cv2.Rodrigues(r)[0]).T * np.matrix(t)
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
R, _ = cv2.Rodrigues(self.rvec)
Rt = np.hstack((R, self.tvec))
M = np.eye(4)
M[:3,:] = np.dot(Rx, Rt)
return M
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
R, _ = cv2.Rodrigues(self.rvec)
Rt = np.hstack((R, self.tvec))
M = np.eye(4)
M[:3,:] = np.dot(Rx, Rt)
return M
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
R, _ = cv2.Rodrigues(self.rvec)
Rt = np.hstack((R, self.tvec))
M = np.eye(4)
M[:3,:] = np.dot(Rx, Rt)
return M
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
R, _ = cv2.Rodrigues(self.rvec)
Rt = np.hstack((R, self.tvec))
M = np.eye(4)
M[:3,:] = np.dot(Rx, Rt)
return M
def detect(self, img):
self.likelihood = self.detector.detect_mat(img)
detection = None
if self.likelihood > 0.1:
# get board and draw it
board = self.detector.getDetectedBoard()
rvec = board.Rvec.copy()
tvec = board.Tvec.copy()
matrix = cv2.Rodrigues(rvec)[0]
rodrigues = mat2euler(matrix)
detection = Transform.from_parameters(tvec[0], -tvec[1], -tvec[2], rodrigues[0], -rodrigues[1], -rodrigues[2])
return detection
def register_chessboard(self):
"""
Compute the transformation between robot pose and camera pose using chessboard registration
techniques
Returns
-------
numpy array:
(3,4) shape array that is the computed transformation
"""
p_mm = self.get_corners_mm()
c_mm = self.find_chessboard()
dist_coef = np.zeros(4)
ret,r_vec,t_vec = cv2.solvePnP(p_mm.T,c_mm.T,self.C,dist_coef)
r_mat,j = cv2.Rodrigues(r_vec)
trans = np.zeros([3,4])
trans[0:3,0:3] = r_mat
trans[:,3] = t_vec[:,0]
self.trans = trans
return trans
def get_cam_to_lab_rotation(self,field=None):
if field is None:
if self.nfields > 1:
raise Exception('This calibration has multiple sub-fields; you must specify a pixel location to get_cam_to_lab_rotation!')
else:
field = 0
rotation_matrix = np.matrix(cv2.Rodrigues(self.fit_params[field].rvec)[0])
return rotation_matrix.transpose()
# Given pixel coordinates x,y, return the NORMALISED
# coordinates of the corresponding un-distorted points.
def __init__(self, g_pool, eye_camera_to_world_matrix0, eye_camera_to_world_matrix1 , camera_intrinsics , cal_points_3d = [],cal_ref_points_3d = [], cal_gaze_points0_3d = [], cal_gaze_points1_3d = [] ):
super().__init__(g_pool)
self.eye_camera_to_world_matricies = eye_camera_to_world_matrix0 , eye_camera_to_world_matrix1
self.rotation_matricies = eye_camera_to_world_matrix0[:3,:3],eye_camera_to_world_matrix1[:3,:3]
self.rotation_vectors = cv2.Rodrigues( eye_camera_to_world_matrix0[:3,:3] )[0] , cv2.Rodrigues( eye_camera_to_world_matrix1[:3,:3] )[0]
self.translation_vectors = eye_camera_to_world_matrix0[:3,3] , eye_camera_to_world_matrix1[:3,3]
self.cal_points_3d = cal_points_3d
self.cal_ref_points_3d = cal_ref_points_3d
self.cal_gaze_points0_3d = cal_gaze_points0_3d #save for debug window
self.cal_gaze_points1_3d = cal_gaze_points1_3d #save for debug window
self.camera_matrix = camera_intrinsics['camera_matrix']
self.dist_coefs = camera_intrinsics['dist_coefs']
self.world_frame_size = camera_intrinsics['resolution']
self.camera_intrinsics = camera_intrinsics
self.visualizer = Calibration_Visualizer(g_pool,
world_camera_intrinsics=camera_intrinsics ,
cal_ref_points_3d = cal_points_3d,
cal_observed_points_3d = cal_ref_points_3d,
eye_camera_to_world_matrix0= self.eye_camera_to_world_matricies[0],
cal_gaze_points0_3d = cal_gaze_points0_3d,
eye_camera_to_world_matrix1= self.eye_camera_to_world_matricies[1],
cal_gaze_points1_3d = cal_gaze_points1_3d)
self.g_pool = g_pool
self.gaze_pts_debug0 = []
self.gaze_pts_debug1 = []
self.intersection_points_debug = []
self.sphere0 = {}
self.sphere1 = {}
self.last_gaze_distance = 500.
def handle_monocular(self, msg):
(image, camera) = msg
gray = self.mkgray(image)
C = self.image_corners(gray)
if C is not None:
linearity_rms = self.mc.linear_error(C, self.board)
# Add in reprojection check
image_points = C
object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
dist_coeffs = numpy.zeros((4, 1))
camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ],
[ camera.P[4], camera.P[5], camera.P[6] ],
[ camera.P[8], camera.P[9], camera.P[10] ] ] )
ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
# Convert rotation into a 3x3 Rotation Matrix
rot3x3, _ = cv2.Rodrigues(rot)
# Reproject model points into image
object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans)
reprojected_h = camera_matrix * object_points_world
reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :])
reprojection_errors = image_points.squeeze().T - reprojected
reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))
# Print the results
print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
else:
print('no chessboard')
def getGLM(self):
R, _ = cv2.Rodrigues(self.RVEC)
Rt = np.hstack((R,self.TVEC))
Rx = np.array([[1,0,0],[0,-1,0],[0,0,-1]])
M = np.eye(4)
M[:3,:] = np.dot(Rx,Rt)
m = M.T
return m.flatten()
# Debug code.
def getGLM(self):
R, _ = cv2.Rodrigues(self.RVEC)
Rt = np.hstack((R,self.TVEC))
Rx = np.array([[1,0,0],[0,-1,0],[0,0,-1]])
M = np.eye(4)
M[:3,:] = np.dot(Rx,Rt)
m = M.T
return m.flatten()
# Debug code.
def drawCross(img, params, center=(100, 100), scale=30.0):
R = cv2.Rodrigues(params[1:4])[0]
points = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
points = np.dot(points, R.T)
points2D = points[:, :2]
points2D = (points2D * scale + center).astype(np.int32)
cv2.line(img, (center[0], center[1]), (points2D[0, 0], points2D[0, 1]), (255, 0, 0), 3)
cv2.line(img, (center[0], center[1]), (points2D[1, 0], points2D[1, 1]), (0, 255, 0), 3)
cv2.line(img, (center[0], center[1]), (points2D[2, 0], points2D[2, 1]), (0, 0, 255), 3)
def __init__(self, transform=None, inverse_transform=None, rotation=None, translation_vector=None):
if translation_vector is not None:
if type(translation_vector) != np.ndarray:
translation_vector = np.array(translation_vector)
if translation_vector.shape != (3, 1):
translation_vector = translation_vector.reshape(3, 1)
if rotation is not None:
if type(rotation) != np.ndarray:
rotation = np.array(rotation)
if rotation.size == 9:
rotation_vector = cv2.Rodrigues(rotation)[0]
rotation_matrix = rotation
elif rotation.size == 3:
rotation_matrix = cv2.Rodrigues(rotation)[0]
rotation_vector = rotation
else:
raise ValueError(
"Wrong rotation size: {:d}. Expecting a 3-length vector or 3x3 matrix.".format(rotation.size))
if transform is None:
if translation_vector is None or rotation is None:
raise (ValueError("Expecting either the transform matrix or both the rotation & translation vector"))
self.T = np.vstack((np.append(rotation_matrix, translation_vector, axis=1), [0, 0, 0, 1]))
else:
self.T = transform
if translation_vector is None:
translation_vector = transform[0:3, 3].reshape(3, 1)
if rotation is None:
rotation_matrix = transform[0:3, 0:3]
rotation_vector = cv2.Rodrigues(rotation_matrix)[0]
if inverse_transform is None:
rot_mat_inv = rotation_matrix.T
inverse_translation = -rot_mat_inv.dot(translation_vector)
inverse_transform = np.vstack((np.append(rot_mat_inv, inverse_translation, 1), [0, 0, 0, 1]))
self.rmat = rotation_matrix
self.tvec = translation_vector
self.rvec = rotation_vector
self.T_inv = inverse_transform
def compute_r(self):
import cv2
return cv2.Rodrigues(self.rt.r)[0]
def compute_dr_wrt(self, wrt):
import cv2
if wrt is self.rt:
return cv2.Rodrigues(self.rt.r)[1].T
def rotate_to_xz_plane(points, normal=None):
'''
Rotates points to the x-z plane. If the initial center
of mass is not to within 1e-5 of the origin, we
translate it to the origin.
Returns (r, R, p0):
- r is the rotated image of points,
- R is the rotation matrix
- p0 is the translation factor (can be None)
'''
import cv2
from blmath.geometry.transform.translation import translation
if points is None or not len(points): # pylint: disable=len-as-condition
raise ValueError('Some points are required')
center = np.mean(points, axis=0)
if np.linalg.norm(center) > 1e-5:
translated, p0 = translation(points)
else:
translated, p0 = points, None
if not normal:
normal = estimate_normal(points)
e_2 = np.array([0., 1., 0.])
theta = np.arccos(np.dot(e_2, normal))
if min(abs(theta - np.pi), abs(theta)) < 1e-5:
# cross product will degenerate
# to zero vector in this case
r_axis = np.array([1., 0., 0.])
else:
r_axis = np.cross(normal, e_2)
r_axis /= np.linalg.norm(r_axis)
R = cv2.Rodrigues(theta * r_axis)[0]
rotated = np.dot(translated, R.T)
return (rotated, R, p0)