def _calculate_transformation_matrix(self):
p1_w, p2_w, p3_w, p4_w = self._calculate_world_coordinates()
rect = np.array([
[0, self.horizon_y],
[self.image_resolution[1] - 1, self.horizon_y],
[self.image_resolution[1] - 1, self.image_resolution[0] - 1],
[0, self.image_resolution[0] - 1]
], dtype="float32")
p1_new, p2_new, p3_new, p4_new = self._calculate_destination_points(
p1_w, p2_w, p3_w, p4_w)
dst = np.array([
[p1_new[0], p1_new[1]],
[p2_new[0], p2_new[1]],
[p3_new[0], p3_new[1]],
[p4_new[0], p4_new[1]]
], dtype="float32")
self.transformation_matrix = cv2.getPerspectiveTransform(rect, dst)
self.transformated_image_resolution = (int(p2_new[0]), self.image_resolution[0]) # width: most right point / height: height from orignal image
inverse_perspective_mapping_node.py 文件源码
python
阅读 21
收藏 0
点赞 0
评论 0
评论列表
文章目录