def __calibrate_intrinsics(camera, image_points, object_points, flags, criteria):
"""
Calibrate intrinsics of the provided camera using provided image & object points & calibration flags & criteria.
@param camera: camera to calibrate
@param image_points: points in images taken with the camera that correspond to the 3d object_points.
@param object_points: 3d points on the object that appears in *each* of the images.
Usually, inner corners of a calibration board. Note: assumes *the same* object appears in all of the images.
@param flags: OpenCV camera calibration flags. For details, see OpenCV calib3d documentation, calibrate function.
@param criteria: OpenCV criteria.
@return: estimated object-space rotation & translation vectors of the camera (assuming object is static)
"""
# OpenCV prefers [width x height] as "Size" to [height x width]
frame_dims = (camera.intrinsics.resolution[1], camera.intrinsics.resolution[0])
start = time.time()
camera.intrinsics.error, camera.intrinsics.intrinsic_mat, camera.intrinsics.distortion_coeffs, \
rotation_vectors, translation_vectors = \
cv2.calibrateCamera(objectPoints=np.array([object_points]*len(image_points)), imagePoints=image_points,
imageSize=frame_dims, cameraMatrix=camera.intrinsics.intrinsic_mat,
distCoeffs=camera.intrinsics.distortion_coeffs,
flags=flags, criteria=criteria)
end = time.time()
camera.intrinsics.time = end - start
camera.intrinsics.timestamp = end
camera.intrinsics.calibration_image_count = len(image_points)
return rotation_vectors, translation_vectors
python类calibrateCamera()的实例源码
def calibrate(images):
obj_points, img_points = find_points(images)
if len(img_points) == 0:
print('Impossible to calibrate: could not find any image points')
raise
print('Calibrating using %s images...' % len(img_points))
image_size = images[0][0].shape[::-1]
reprojection_error, camera_matrix, distortion_coefficient, rotation_v,\
translation_v = cv.calibrateCamera(obj_points, img_points, image_size)
out = {}
out['reprojection_error'] = reprojection_error
out['camera_matrix'] = camera_matrix
out['distortion_coefficient'] = distortion_coefficient
out['rotation_v'] = rotation_v
out['translation_v'] = translation_v
return out
def __init__(self, calibration=None, calib_data_path=None, rows=6, cols=9, cal_image_shape=None):
"""Helper class to remove lens distortion from images (camera calibration)
Args:
calibration: precalculated calibration matrices
calib_data_path: path to data for camera calibration
rows: number of rows on chessboard
cols: number of columns on chessboard
cal_image_shape: calibration image shape
"""
if calibration is not None:
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(
calibration['objpoints'], calibration['imgpoints'], cal_image_shape, None, None)
else:
calibration = calculate_camera_calibration(
calib_data_path, rows, cols, cal_image_shape)
self.mtx = calibration['mtx']
self.dist = calibration['dist']
def __init__(self, image_points, dest_points, size=(3840, 2160)):
dest_3dpoints = [[x, y, 0] for x, y in dest_points]
_, camera_matrix, dist_coeffs, _, _ = cv2.calibrateCamera(
[np.float32([dest_3dpoints])],
[np.float32([image_points])],
size, None, None, flags=self.flags)
self.image_size = size
self.camera_matrix = camera_matrix
self.dist_coeffs = dist_coeffs
self.new_camera_matrix = cv2.getOptimalNewCameraMatrix(
camera_matrix, dist_coeffs, self.image_size, 0)[0]
def calculate(self):
self.calculated = True
self.count = 10
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(np.array(self.obj_points), np.array(self.img_points),self.g_pool.capture.frame_size,None,None)
logger.info("Calibrated Camera, RMS:{}".format(rms))
camera_calibration = {'camera_matrix':camera_matrix,'dist_coefs':dist_coefs,'camera_name':self.g_pool.capture.name,'resolution':self.g_pool.capture.frame_size}
save_object(camera_calibration,os.path.join(self.g_pool.user_dir,"camera_calibration"))
logger.info("Calibration saved to user folder")
self.camera_intrinsics = camera_matrix,dist_coefs,self.g_pool.capture.frame_size
self.show_undistortion_switch.read_only=False
def calibrate_camera(img_pts, obj_pts, img_size):
# generate pattern size
camera_matrix = np.zeros((3,3))
dist_coef = np.zeros(4)
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_pts, img_pts,
img_size, camera_matrix, dist_coef)
return camera_matrix, dist_coefs
def cal_fromcorners(self, good):
"""
:param good: Good corner positions and boards
:type good: [(corners, ChessboardInfo)]
"""
boards = [ b for (_, b) in good ]
ipts = [ points for (points, _) in good ]
opts = self.mk_object_points(boards)
self.intrinsics = numpy.zeros((3, 3), numpy.float64)
if self.calib_flags & cv2.CALIB_RATIONAL_MODEL:
self.distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial
else:
self.distortion = numpy.zeros((5, 1), numpy.float64) # plumb bob
# If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
self.intrinsics[0,0] = 1.0
self.intrinsics[1,1] = 1.0
cv2.calibrateCamera(
opts, ipts,
self.size, self.intrinsics,
self.distortion,
flags = self.calib_flags)
# R is identity matrix for monocular calibration
self.R = numpy.eye(3, dtype=numpy.float64)
self.P = numpy.zeros((3, 4), dtype=numpy.float64)
self.set_alpha(0.0)
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera() and cv2.undistort()
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,
imgpoints,
img_size,
None,
None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def coeffs(self):
if not self._coeffs:
if not self.findCount:
raise NothingFound(
'can create camera calibration because no corners have been found')
# http://en.wikipedia.org/wiki/Reprojection_error
try:
(reprojectionError, cameraMatrix, distortionCoeffs,
rotationVecs, translationVecs) = cv2.calibrateCamera(
self.objpoints,
self.opts['imgPoints'],
self.img.shape[::-1], None, None)
print('reprojectionError=%s' % reprojectionError)
except Exception as err:
raise NothingFound(err)
self._coeffs = OrderedDict([
('reprojectionError', reprojectionError),
('apertureSize', self.apertureSize),
('cameraMatrix', cameraMatrix),
('distortionCoeffs', distortionCoeffs),
('shape', self.img.shape),
#('rotationVecs',rotationVecs),
#('translationVecs',translationVecs),
])
if self.apertureSize is not None:
(fovx, fovy, focalLength, principalPoint,
aspectRatio) = cv2.calibrationMatrixValues(
cameraMatrix, self.img.shape, *self.apertureSize)
self._coeffs.update(OrderedDict([
('fovx', fovx),
('fovy', fovy),
('focalLength', focalLength),
('principalPoint', principalPoint),
('aspectRatio', aspectRatio)])
)
return self._coeffs
def getP(self, dst):
"""
dst: ??????
return self.MTX,self.DIST,self.RVEC,self.TVEC:
?? ?????????????????
"""
if self.SceneImage is None:
return None
corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
objp = np.zeros((2*2,3), np.float32)
objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
if self.PTimes < self.PCount or self.PCount == 0:
# Arrays to store object points and image points from all the images.
objpoints = self.OBJPoints # 3d point in real world space
imgpoints = self.IMGPoints # 2d points in image plane.
if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
objpoints.append(objp)
imgpoints.append(corners2)
# Find mtx, dist, rvecs, tvecs
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
if not ret:
self.PTimes += 1
return None
self.OBJPoints = objpoints
self.IMGPoints = imgpoints
self.MTX = mtx
self.DIST = dist
self.RVEC = rvecs[0]
self.TVEC = tvecs[0]
else:
# Find the rotation and translation vectors.
_, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
self.RVEC = rvec
self.TVEC = tvec
self.PTimes += 1
return self.MTX,self.DIST,self.RVEC,self.TVEC
def getP(self, dst):
"""
dst: ??????
return self.MTX,self.DIST,self.RVEC,self.TVEC:
?? ?????????????????
"""
if self.SceneImage is None:
return None
corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
objp = np.zeros((2*2,3), np.float32)
objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
if self.PTimes < self.PCount or self.PCount == 0:
# Arrays to store object points and image points from all the images.
objpoints = self.OBJPoints # 3d point in real world space
imgpoints = self.IMGPoints # 2d points in image plane.
if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
objpoints.append(objp)
imgpoints.append(corners2)
# Find mtx, dist, rvecs, tvecs
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
if not ret:
self.PTimes += 1
return None
self.OBJPoints = objpoints
self.IMGPoints = imgpoints
self.MTX = mtx
self.DIST = dist
self.RVEC = rvecs[0]
self.TVEC = tvecs[0]
else:
# Find the rotation and translation vectors.
_, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
self.RVEC = rvec
self.TVEC = tvec
self.PTimes += 1
return self.MTX,self.DIST,self.RVEC,self.TVEC
def camera_cal(self, image):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
nx = 8
ny = 6
dst = np.copy(image)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny * nx, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Search for chessboard corners
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#ret_thresh, mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)
ret, corners = cv2.findChessboardCorners(image, (nx, ny), None) #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
self.calibrated = True
print ("FOUND!")
#Draw and display the corners
cv2.drawChessboardCorners(image, (nx, ny), corners, ret)
# Do camera calibration given object points and image points
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = self.mtx
dist_pickle["dist"] = self.dist
dist_pickle['objpoints'] = objpoints
dist_pickle['imgpoints'] = imgpoints
pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )
#else:
#print("Searching...")
return image
def calculate_camera_calibration(calib_path, rows, cols, cal_image_size):
"""Calculates the camera calibration based on chessboard images.
Args:
calib_path: calibration data (imgs) dir path
rows: number of rows on chessboard
cols: number of columns on chessboard
Returns:
a `dict` with calibration points
"""
objp = np.zeros((rows * cols, 3), np.float32)
objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)
objpoints = []
imgpoints = []
images = glob(calib_path)
cal_images = np.zeros((len(images), *cal_image_size), dtype=np.uint8)
successfull_cnt = 0
for idx, fname in enumerate(tqdm(images, desc='Processing image')):
img = scipy.misc.imread(fname)
if img.shape[0] != cal_image_size[0] or img.shape[1] != cal_image_size[1]:
img = scipy.misc.imresize(img, cal_image_size)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)
if ret:
successfull_cnt += 1
objpoints.append(objp)
imgpoints.append(corners)
img = cv2.drawChessboardCorners(img, (cols, rows), corners, ret)
cal_images[idx] = img
print("%s/%s camera calibration images processed." %
(successfull_cnt, len(images)))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, cal_image_size[:-1], None, None)
calibration = {'objpoints': objpoints,
'imgpoints': imgpoints,
'cal_images': cal_images,
'mtx': mtx,
'dist': dist,
'rvecs': rvecs,
'tvecs': tvecs}
return calibration