def corners_unwarp(img, nx, ny, mtx, dist):
# Use the OpenCV undistort() function to remove distortion
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
python类undistort()的实例源码
def corners_unwarp(img, nx, ny, undistorted):
M = None
warped = np.copy(img)
# Use the OpenCV undistort() function to remove distortion
undist = undistorted
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
def ray(self, pts, undistort=True, rotate=False, normalize=False):
"""
Returns the ray corresponding to the points.
Optionally undistort (defaults to true), and
rotate ray to the camera's viewpoint
"""
upts = self.undistort_points(pts) if undistort else pts
ret = unproject_points(
np.hstack([ (colvec(upts[:,0])-self.cx) / self.fx, (colvec(upts[:,1])-self.cy) / self.fy ])
)
if rotate:
ret = self.extrinsics.rotate_vec(ret)
if normalize:
ret = ret / np.linalg.norm(ret, axis=1)[:, np.newaxis]
return ret
def update(self,frame,events):
if self.collect_new:
img = frame.img
status, grid_points = cv2.findCirclesGrid(img, (4,11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
if status:
self.img_points.append(grid_points)
self.obj_points.append(self.obj_grid)
self.collect_new = False
self.count -=1
self.button.status_text = "{:d} to go".format(self.count)
if self.count<=0 and not self.calculated:
self.calculate()
self.button.status_text = ''
if self.window_should_close:
self.close_window()
if self.show_undistortion:
adjusted_k,roi = cv2.getOptimalNewCameraMatrix(cameraMatrix= self.camera_intrinsics[0], distCoeffs=self.camera_intrinsics[1], imageSize=self.camera_intrinsics[2], alpha=0.5,newImgSize=self.camera_intrinsics[2],centerPrincipalPoint=1)
self.undist_img = cv2.undistort(frame.img, self.camera_intrinsics[0], self.camera_intrinsics[1],newCameraMatrix=adjusted_k)
def binary_extraction(self,image, ksize=3):
# undistort first
#image = self.undistort(image)
color_bin = self.color_thresh(image,thresh=(90, 150)) # initial values 110, 255
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize)
gradx = self.abs_sobel_thresh(sobelx, thresh=(100, 190)) # initial values 40, 160
grady = self.abs_sobel_thresh(sobely, thresh=(100, 190)) # initial values 40, 160
mag_binary = self.mag_thresh(sobelx, sobely, mag_thresh=(100, 190)) # initial values 40, 160
#dir_binary = self.dir_threshold(sobelx, sobely, thresh=(0.7, 1.3))
combined = np.zeros_like(gradx)
#combined[(((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))) | (color_bin==1) ] = 1
combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) | (color_bin==1) ] = 1
#combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) ] = 1
return combined
# transform perspective
def undistort_image(im, K, D):
"""
Optionally:
newcamera, roi = cv2.getOptimalNewCameraMatrix(self.K, self.D, (W,H), 0)
"""
H,W = im.shape[:2]
Kprime, roi = cv2.getOptimalNewCameraMatrix(K, D, (W,H), 1, (W,H))
return cv2.undistort(im, K, D, None, K)
# def camera_from_P(P):
def reconstruct(self, xyZ, undistort=True):
"""
Reproject to 3D with calib params
"""
Z = colvec(xyZ[:,2])
return self.ray(xyZ[:,:2], undistort=undistort) * Z
def undistort(self, im):
return undistort_image(im, self.K, self.D)
def undistort_debug(self, im=None):
if im is None:
im = np.zeros(shape=self.shape, dtype=np.uint8)
im[::20, :] = 128
im[:, ::20] = 128
return self.undistort(im)
def undistort_image(self, image):
return cv2.undistort(image, self.camera_matrix, self.dist_coeffs,
newCameraMatrix=self.new_camera_matrix)
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera() and cv2.undistort()
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,
imgpoints,
img_size,
None,
None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def remove_distortion(images):
out = calibrate(images)
matrix = out['camera_matrix']
dist = out['distortion_coefficient']
undistorted_images = []
for (image, color_image) in images:
size = image.shape[::-1]
new_matrix, roi = cv.getOptimalNewCameraMatrix(matrix, dist, size,
1, size)
img = cv.undistort(color_image, matrix, dist, None, new_matrix)
undistorted_images.append(img)
return undistorted_images
def undistort(self, img):
""" Restore image from camera distrotation using calibration matrics
Args:
img: input image
Returns:
restored image
"""
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def undistort_image(self, im):
return cv2.undistort(im, self.camera_matrix, self.distortion_coefficients)
def set_cam_calib_param(self, mtx, dst):
self.cam_mtx = mtx
self.cam_dst = dst
# undistort image
def undistort(self, img):
return cv2.undistort(img, self.cam_mtx, self.cam_dst, None,self.cam_mtx)
# get binary image based on color thresholding
def undistort_image(self, image):
return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
def transform(self, img, k1=0.22, k2=0.24):
"""Apply barrel distortion using OpenCV's Undistort operation
This counteracts the pincushion distortion that the Oculus lens
applies. The distortion coefficients k1 and k2 are the main
action here.
[1]: http://docs.opencv.org/trunk/doc/py_tutorials/py_calib3d/\
py_calibration/py_calibration.html
"""
return cv2.undistort(
img,
self.d_matrix,
np.array([k1, k2, 0, 0, 0])
)
def getImage(self):
image = self._getRawImage()
image = cv2.undistort(image, self.matrix, self.distortion)
return image
def getImage(self):
image = self._getRawImage()
image = cv2.undistort(image, self.matrix, self.distortion)
return image
def undistort_image(self,image,coords=None):
if self.nfields > 1:
raise Exception('This feature is not supported for split-FOV images!')
imobj_out = None
if isinstance(image,CalCam_Image):
imobj_out = copy.deepcopy(image)
image = imobj_out.transform.original_to_display_image(imobj_out.data)
coords='display'
display_shape = self.transform.get_display_shape()
if coords is None:
coords = 'display'
if image.shape[0] != display_shape[1] or image.shape[1] != display_shape[0]:
if image.shape[0] == self.transform.y_pixels and image.shape[1] == self.transform.x_pixels:
coords = 'original'
else:
raise ValueError('Supplied image is the wrong shape! Expected {:d}x{:d} or {:d}x{:d} pixels.'.format(display_shape[0],display_shape[1],self.transform.x_pixels,self.transform.y_pixels))
if coords == 'original':
im = self.transform.original_to_display_image(image)
else:
im = image
im = cv2.undistort(im,self.fit_params[0].cam_matrix,self.fit_params[0].kc)
if coords == 'original':
im = self.transform.display_to_original_image(im)
if imobj_out is not None:
imobj_out.data = imobj_out.transform.display_to_original_image(im)
return imobj_out
else:
return im
# Class for storing the calibration results.
# Has methods for post-processing the results to give useful information
# and for loading and saving the results
def undistort_crop2(orig_img):
#undistort and crop
#cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]]) -> dst
dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx)
x,y,w,h = roi
crop_frame = dst[y:y+h, x:x+w]
return crop_frame
# create maps for undistortion
def undistort_crop(orig_img):
#undistort and crop
dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx)
x,y,w,h = roi
crop_frame = dst[y:y+h, x:x+w]
return crop_frame
def undistort_crop(orig_img):
#undistort and crop
dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx)
x,y,w,h = roi
crop_frame = dst[y:y+h, x:x+w]
return crop_frame