def render_lane(image, corners, ploty, fitx, ):
_, src, dst = perspective_transform(image, corners)
Minv = cv2.getPerspectiveTransform(dst, src)
# Create an image to draw the lines on
warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts = np.vstack((fitx,ploty)).astype(np.int32).T
# Draw the lane onto the warped blank image
#plt.plot(left_fitx, ploty, color='yellow')
cv2.polylines(color_warp, [pts], False, (0, 255, 0), 10)
#cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)
return result
python类getPerspectiveTransform()的实例源码
def corners_unwarp(img, nx, ny, mtx, dist):
# Use the OpenCV undistort() function to remove distortion
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
def corners_unwarp(img, nx, ny, undistorted):
M = None
warped = np.copy(img)
# Use the OpenCV undistort() function to remove distortion
undist = undistorted
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
def m_screen_to_marker(marker):
#verts need to be sorted counterclockwise stating at bottom left
#marker coord system:
# +-----------+
# |0,1 1,1| ^
# | | / \
# | | | UP
# |0,0 1,0| |
# +-----------+
mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32)
return cv2.getPerspectiveTransform(np.array(marker['verts'],dtype=np.float32),mapped_space_one)
#persistent vars for detect_markers_robust
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32"
)
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def perspective_transform(img, points):
"""Transform img so that points are the new corners"""
source = np.array(
points,
dtype="float32")
dest = np.array([
[TRANSF_SIZE, TRANSF_SIZE],
[0, TRANSF_SIZE],
[0, 0],
[TRANSF_SIZE, 0]],
dtype="float32")
img_dest = img.copy()
transf = cv2.getPerspectiveTransform(source, dest)
warped = cv2.warpPerspective(img, transf, (TRANSF_SIZE, TRANSF_SIZE))
return warped
def perspective_transform(image, corners, debug=False, xoffset=0):
height, width = image.shape[0:2]
output_size = height/2
new_top_left=np.array([corners[0,0],0])
new_top_right=np.array([corners[3,0],0])
offset=[xoffset,0]
img_size = (image.shape[1], image.shape[0])
src = np.float32([corners[0],corners[1],corners[2],corners[3]])
dst = np.float32([corners[0]+offset,new_top_left+offset,new_top_right-offset ,corners[3]-offset])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)
if debug:
drawQuad(image, src, [255, 0, 0])
drawQuad(warped, dst, [255, 255, 0])
plt.imshow(image)
plt.show()
plt.imshow(warped)
plt.show()
return warped, src, dst
def perspective_transform(self, image, debug=True, size_top=70, size_bottom=370):
height, width = image.shape[0:2]
output_size = height/2
#src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
#dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])
M = cv2.getPerspectiveTransform(src, dst)
print(M)
warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)
if debug:
self.drawQuad(image, src, [255, 0, 0])
self.drawQuad(image, dst, [255, 255, 0])
plt.imshow(image)
plt.show()
return warped
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
num, W, H, _ = ims_src.shape
if distortion:
ran_noise = np.random.random((4, 2))
ran_color = np.random.randn(3,)
else:
ran_noise = np.ones((4, 2)) * 0.5
ran_color = np.zeros(3,)
# perspective translation
dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
src = np.float32(dst + noise)
mat = cv2.getPerspectiveTransform(src, dst)
for i in range(num):
ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))
# color deviation
deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
ims_src += deviation[None, None, None, :]
return ims_src, mat
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
num, W, H, _ = ims_src.shape
if distortion:
ran_noise = np.random.random((4, 2))
ran_color = np.random.randn(3,)
else:
ran_noise = np.ones((4, 2)) * 0.5
ran_color = np.zeros(3,)
# perspective translation
dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
src = np.float32(dst + noise)
mat = cv2.getPerspectiveTransform(src, dst)
for i in range(num):
ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))
# color deviation
deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
ims_src += deviation[None, None, None, :]
return ims_src, mat
preview_dataset.py 文件源码
项目:ego-lane-analysis-system
作者: rodrigoberriel
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def apply_ipm(img, config, ys):
# IPM
y_top, y_bottom = min(ys), max(ys)
ipm_pts = config['dataset']['ipm_points']
roi = config['dataset']['region_of_interest']
src = np.array([
[ipm_pts['@top_left'], y_top],
[ipm_pts['@top_right'], y_top],
[ipm_pts['@bottom_right'], y_bottom],
[ipm_pts['@bottom_left'], y_bottom],
], dtype="float32")
dst = np.array([
[ipm_pts['@top_left'], 0],
[ipm_pts['@top_right'], 0],
[ipm_pts['@top_right'], roi['@height']],
[ipm_pts['@top_left'], roi['@height']],
], dtype="float32")
M = cv2.getPerspectiveTransform(src, dst)
return cv2.warpPerspective(img, M, (roi['@width'], roi['@height']))
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def randomShiftScaleRotate(img, shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, u=0.5):
if random.random() < u:
height, width, channel = img.shape
angle = random.uniform(-rotate_limit, rotate_limit) # degree
scale = random.uniform(1 - scale_limit, 1 + scale_limit)
dx = round(random.uniform(-shift_limit, shift_limit)) * width
dy = round(random.uniform(-shift_limit, shift_limit)) * height
cc = math.cos(angle / 180 * math.pi) * (scale)
ss = math.sin(angle / 180 * math.pi) * (scale)
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101) # cv2.BORDER_CONSTANT, borderValue = (0, 0, 0)) #cv2.BORDER_REFLECT_101
return img
def detect_cnt_again(poly, base_img):
"""
???????????????????
:param poly: ndarray
:param base_img: ndarray
:return: ndarray
"""
# ?????????????????flag
flag = False
# ?????????????????????????
top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
roi_img = get_roi_img(base_img, bottom_left, bottom_right, top_left, top_right)
img = get_init_process_img(roi_img)
# ?????????
cnt = get_max_area_cnt(img)
# ?????????????????????
if cv2.contourArea(cnt) > roi_img.shape[0] * roi_img.shape[1] * SHEET_AREA_MIN_RATIO:
flag = True
poly = cv2.approxPolyDP(cnt, cv2.arcLength((cnt,), True) * 0.1, True)
top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
if not poly.shape[0] == 4:
raise PolyNodeCountError
# ?????????????????
base_poly_nodes = np.float32([top_left[0], bottom_left[0], top_right[0], bottom_right[0]])
base_nodes = np.float32([[0, 0],
[base_img.shape[1], 0],
[0, base_img.shape[0]],
[base_img.shape[1], base_img.shape[0]]])
transmtx = cv2.getPerspectiveTransform(base_poly_nodes, base_nodes)
if flag:
img_warp = cv2.warpPerspective(roi_img, transmtx, (base_img.shape[1], base_img.shape[0]))
else:
img_warp = cv2.warpPerspective(base_img, transmtx, (base_img.shape[1], base_img.shape[0]))
return img_warp
inverse_perspective_mapping_node.py 文件源码
项目:autonomous_driving
作者: StatueFungus
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _calculate_transformation_matrix(self):
p1_w, p2_w, p3_w, p4_w = self._calculate_world_coordinates()
rect = np.array([
[0, self.horizon_y],
[self.image_resolution[1] - 1, self.horizon_y],
[self.image_resolution[1] - 1, self.image_resolution[0] - 1],
[0, self.image_resolution[0] - 1]
], dtype="float32")
p1_new, p2_new, p3_new, p4_new = self._calculate_destination_points(
p1_w, p2_w, p3_w, p4_w)
dst = np.array([
[p1_new[0], p1_new[1]],
[p2_new[0], p2_new[1]],
[p3_new[0], p3_new[1]],
[p4_new[0], p4_new[1]]
], dtype="float32")
self.transformation_matrix = cv2.getPerspectiveTransform(rect, dst)
self.transformated_image_resolution = (int(p2_new[0]), self.image_resolution[0]) # width: most right point / height: height from orignal image
inverse_perspective_mapping.py 文件源码
项目:autonomous_driving
作者: StatueFungus
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def _calculate_transformation_matrix(self):
p1_w, p2_w, p3_w, p4_w = self._calculate_world_coordinates()
rect = np.array([
[0, self.horizon_y],
[self.image_resolution[1] - 1, self.horizon_y],
[self.image_resolution[1] - 1, self.image_resolution[0] - 1],
[0, self.image_resolution[0] - 1]
], dtype="float32")
p1_new, p2_new, p3_new, p4_new = self._calculate_destination_points(
p1_w, p2_w, p3_w, p4_w)
dst = np.array([
[p1_new[0], p1_new[1]],
[p2_new[0], p2_new[1]],
[p3_new[0], p3_new[1]],
[p4_new[0], p4_new[1]]
], dtype="float32")
self.transformation_matrix = cv2.getPerspectiveTransform(rect, dst)
self.transformated_image_resolution = (int(p2_new[0]), self.image_resolution[0]) # width: most right point / height: height from orignal image
def fix_target_perspective(contour, bin_shape):
"""
Fixes the perspective so it always looks as if we are viewing it head-on
:param contour:
:param bin_shape: numpy shape of the binary image matrix
:return: a new version of contour with corrected perspective, a new binary image to test against,
"""
before_warp = np.zeros(bin_shape, np.uint8)
cv2.drawContours(before_warp, [contour], -1, 255, -1)
try:
corners = get_corners(contour)
# get a perspective transformation so that the target is warped as if it was viewed head on
shape = (400, 280)
dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
warp = cv2.getPerspectiveTransform(corners, dest_corners)
fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
fixed_perspective = fixed_perspective.astype(np.uint8)
if int(cv2.__version__.split('.')[0]) >= 3:
_, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
new_contour = contours[0]
return new_contour, fixed_perspective
except ValueError:
raise ValueError('Failed to detect rectangle')
def transform_by4(self, img, points):
points = sorted(points, key=lambda x: x[1])
if len(points) == 4:
top = sorted(points[:2], key=lambda x: x[0])
bottom = sorted(points[2:], key=lambda x: x[0], reverse=True)
points = np.array(top + bottom, dtype='float32')
else:
y_min, y_max = points[0][1], points[-1][1]
points = sorted(points, key=lambda x: x[0])
x_min, x_max = points[0][0], points[-1][0]
points = np.array([np.array([x_min, y_min]),
np.array([x_max, y_min]),
np.array([x_max, y_max]),
np.array([x_min, y_max])],
np.float32)
width = max(np.sqrt(((points[0][0] - points[2][0]) ** 2) * 2),
np.sqrt(((points[1][0] - points[3][0]) ** 2) * 2))
height = max(np.sqrt(((points[0][1] - points[2][1]) ** 2) * 2),
np.sqrt(((points[1][1] - points[3][1]) ** 2) * 2))
dst = np.array([np.array([0, 0]),
np.array([width - 1, 0]),
np.array([width - 1, height - 1]),
np.array([0, height - 1]),
], np.float32)
# ??????????????????????????
trans = cv2.getPerspectiveTransform(points, dst)
return cv2.warpPerspective(img, trans, (int(width), int(height)))
def m_verts_to_screen(verts):
#verts need to be sorted counter-clockwise stating at bottom left
return cv2.getPerspectiveTransform(marker_corners_norm,verts)
def m_verts_from_screen(verts):
#verts need to be sorted counter-clockwise stating at bottom left
return cv2.getPerspectiveTransform(verts,marker_corners_norm)
def move_vertex(self,vert_idx,new_pos):
"""
this fn is used to manipulate the surface boundary (coordinate system)
new_pos is in uv-space coords
if we move one vertex of the surface we need to find
the tranformation from old quadrangle to new quardangle
and apply that transformation to our marker uv-coords
"""
before = marker_corners_norm
after = before.copy()
after[vert_idx] = new_pos
transform = cv2.getPerspectiveTransform(after,before)
for m in self.markers.values():
m.uv_coords = cv2.perspectiveTransform(m.uv_coords,transform)
def m_marker_to_screen(marker):
#verts need to be sorted counterclockwise stating at bottom left
#marker coord system:
# +-----------+
# |0,1 1,1| ^
# | | / \
# | | | UP
# |0,0 1,0| |
# +-----------+
mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32)
return cv2.getPerspectiveTransform(mapped_space_one,np.array(marker['verts'],dtype=np.float32))
def compute_warp(rect):
# now that we have our rectangle of points, let's compute
# the width of our new image
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
# ...and now for the height of our new image
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
# take the maximum of the width and height values to reach
# our final dimensions
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
# construct our destination points which will be used to
# map the screen to a top-down, "birds eye" view
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# calculate the perspective transform matrix and warp
# the perspective to grab the screen
M = cv2.getPerspectiveTransform(rect, dst)
return (maxWidth, maxHeight, dst, M)
def transform(image, rectpoints, dpmm):
docpxls = (int(DOCSIZE[0]*dpmm),int(DOCSIZE[1]*dpmm))
docrect = np.array(
[(0,0), (docpxls[0], 0), (docpxls[0], docpxls[1]), (0, docpxls[1])],
'float32')
transmat = cv2.getPerspectiveTransform(np.array(rectpoints, 'float32'), docrect)
return cv2.warpPerspective(image, transmat, docpxls)
def transform_img(self):
""" Transform the top-down image of the arc so that it lays flat in a plane on our cv_image """
if self.vel is not None and self.omega is not None:
pts1 = np.float32([[0,0], [0, IMG_HEIGHT], [IMG_WIDTH, IMG_HEIGHT], [IMG_WIDTH, 0]])
pts2 = np.float32([[200,240], [0, IMG_HEIGHT], [IMG_WIDTH, IMG_HEIGHT], [400, 240]])
M = cv2.getPerspectiveTransform(pts1, pts2)
self.transformed = cv2.warpPerspective(self.arc_image, M, (self.cv_image.shape[0], self.cv_image.shape[1]))
rows, cols, channels = self.transformed.shape
self.transformed = self.transformed[0:IMG_HEIGHT, 0: cols]
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def simplePerspectiveTransform(img, quad, shape=None,
interpolation=cv2.INTER_LINEAR,
inverse=False):
p = sortCorners(quad).astype(np.float32)
if shape is not None:
height, width = shape
else:
# get output image size from avg. quad edge length
width = int(round(0.5 * (np.linalg.norm(p[0] - p[1]) +
np.linalg.norm(p[3] - p[2]))))
height = int(round(0.5 * (np.linalg.norm(p[1] - p[2]) +
np.linalg.norm(p[0] - p[3]))))
dst = np.float32([[0, 0],
[width, 0],
[width, height],
[0, height]])
if inverse:
s0, s1 = img.shape[:2]
dst /= ((width / s1), (height / s0))
H = cv2.getPerspectiveTransform(dst, p)
else:
H = cv2.getPerspectiveTransform(p, dst)
return cv2.warpPerspective(img, H, (width, height), flags=interpolation)
def __init__(self, src, dst):
"""Perspective and Inverse perspective transformer
Args:
src: Source coordinates for perspective transformation
dst: Destination coordinates for perspective transformation
"""
self.src = src
self.dst = dst
self.M = cv2.getPerspectiveTransform(src, dst)
self.M_inv = cv2.getPerspectiveTransform(dst, src)
def transform(self, img, offset=0):
if offset == 0:
return self.warp(img, self.M)
else:
src = self.src.copy()
src[:, 0] = src[:, 0] + offset
dst = self.dst.copy()
dst[:, 0] = dst[:, 0] + offset
M_inv = cv2.getPerspectiveTransform(src, dst)
return self.warp(img, M_inv)