python类warpPerspective()的实例源码

lane_detection_module.py 文件源码 项目:diy_driverless_car_ROS 作者: wilselby 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def render_lane(image, corners, ploty, fitx, ):

    _,  src,  dst = perspective_transform(image, corners)
    Minv = cv2.getPerspectiveTransform(dst, src)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts = np.vstack((fitx,ploty)).astype(np.int32).T

    # Draw the lane onto the warped blank image
    #plt.plot(left_fitx, ploty, color='yellow')
    cv2.polylines(color_warp,  [pts],  False,  (0, 255, 0),  10)
    #cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 

    # Combine the result with the original image
    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)

    return result
warp.py 文件源码 项目:SDcarsLaneDetection 作者: Nazanin1369 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def corners_unwarp(img, nx, ny, mtx, dist):
    # Use the OpenCV undistort() function to remove distortion
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
calibration_camera.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
roomba.py 文件源码 项目:Roomba980-Python 作者: NickWaterton 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image
cpm_utils.py 文件源码 项目:convolutional-pose-machines-tensorflow 作者: timctho 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def warpImage(src, theta, phi, gamma, scale, fovy):
    halfFovy = fovy * 0.5
    d = math.hypot(src.shape[1], src.shape[0])
    sideLength = scale * d / math.cos(deg2Rad(halfFovy))
    sideLength = np.int32(sideLength)

    M = warpMatrix(src.shape[1], src.shape[0], theta, phi, gamma, scale, fovy)
    dst = cv2.warpPerspective(src, M, (sideLength, sideLength))
    mid_x = mid_y = dst.shape[0] // 2
    target_x = target_y = src.shape[0] // 2
    offset = (target_x % 2)

    if len(dst.shape) == 3:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset,
              :]
    else:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset]

    return dst
noise.py 文件源码 项目:RacingRobot 作者: sergionr2 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def rotateImage(image, phi, theta, psi):
    """
    Rotate an image
    :param image: (cv2 image object)
    :param phi: (float)
    :param theta: (float)
    :param psi: (float)
    :return: (cv2 image object)
    """
    # Height, Width, Channels
    h, w, c = image.shape
    F = np.float32([[300, 0, w / 2.], [0, 300, h / 2.], [0, 0, 1]])
    R = rotMatrix([phi, theta, psi])
    T = [[0], [0], [1]]
    T = np.dot(R, T)
    R[0][2] = T[0][0]
    R[1][2] = T[1][0]
    R[2][2] = T[2][0]
    M = np.dot(F, np.linalg.inv(np.dot(F, R)))
    out = cv2.warpPerspective(image, M, (w, h))
    return out
main.py 文件源码 项目:specularity-removal 作者: gmichaeljaison 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _solve(img1, img2):
    h, w, d = img1.shape

    # step 1: Find homography of 2 images
    homo = homography(img2, img1)

    # step 2: warp image2 to image1 frame
    img2_w = cv.warpPerspective(img2, homo, (w, h))

    # step 3: resolve highlights by picking the best pixels out of two images
    im1 = _resolve_spec(img1, img2_w)

    # step 4: repeat the same process for Image2 using warped Image1
    im_w = cv.warpPerspective(im1, np.linalg.inv(homo), (w, h))
    im2 = _resolve_spec(img2, im_w)

    return im1, im2
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def do_warp(M, warp):
    warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
    warp = exposure.rescale_intensity(warp, out_range = (0, 255))

    # the pokemon we want to identify will be in the top-right
    # corner of the warped image -- let's crop this region out
    (h, w) = warp.shape
    (dX, dY) = (int(w * 0.4), int(h * 0.45))
    crop = warp[10:dY, w - dX:w - 10]

    # save the cropped image to file
    cv2.imwrite("cropped.png", crop)

    # show our images
    cv2.imshow("image", image)
    cv2.imshow("edge", edged)
    cv2.imshow("warp", imutils.resize(warp, height = 300))
    cv2.imshow("crop", imutils.resize(crop, height = 300))
    cv2.waitKey(0)
Juggalo.py 文件源码 项目:inyourface 作者: yacomink 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def maskFace(self, frame_image, face):

        img1 = cv2.imread(self.__class__.mask_path, cv2.IMREAD_UNCHANGED);
        elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
        mask = self.getTransPIL(cv2.warpPerspective(img1, h, (frame_image.width,frame_image.height)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))

        enhancer = ImageEnhance.Color(frame_image)
        enhanced = enhancer.enhance(0.1)
        enhancer = ImageEnhance.Brightness(enhanced)
        enhanced = enhancer.enhance(1.2)
        enhancer = ImageEnhance.Contrast(enhanced)
        enhanced = enhancer.enhance(1.2)

        frame_image.paste(enhanced, (0,0), mask)
        frame_image.paste(mask_elements, (0,0), mask_elements)
find_rect_and_transform.py 文件源码 项目:quadrilaterals-rectifier 作者: michal2229 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def extract_rect(im):
    imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)

    ret,thresh = cv2.threshold(imgray, 127, 255, 0)

    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # finding contour with max area
    largest = None
    for cnt in contours:
        if largest == None or cv2.contourArea(cnt) > cv2.contourArea(largest):
            largest = cnt

    peri = cv2.arcLength(largest, True)
    appr = cv2.approxPolyDP(largest, 0.02 * peri, True)

    #cv2.drawContours(im, appr, -1, (0,255,0), 3)
    points_list = [[i[0][0], i[0][1]] for i in appr] 

    left  = sorted(points_list, key = lambda p: p[0])[0:2]
    right = sorted(points_list, key = lambda p: p[0])[2:4]

    print("l " + str(left))
    print("r " + str(right))

    lu = sorted(left, key = lambda p: p[1])[0]
    ld = sorted(left, key = lambda p: p[1])[1]

    ru = sorted(right, key = lambda p: p[1])[0]
    rd = sorted(right, key = lambda p: p[1])[1]

    print("lu " + str(lu))
    print("ld " + str(ld))
    print("ru " + str(ru))
    print("rd " + str(rd))

    lu_ = [ (lu[0] + ld[0])/2, (lu[1] + ru[1])/2 ]
    ld_ = [ (lu[0] + ld[0])/2, (ld[1] + rd[1])/2 ]
    ru_ = [ (ru[0] + rd[0])/2, (lu[1] + ru[1])/2 ]
    rd_ = [ (ru[0] + rd[0])/2, (ld[1] + rd[1])/2 ]

    print("lu_ " + str(lu_))
    print("ld_ " + str(ld_))
    print("ru_ " + str(ru_))
    print("rd_ " + str(rd_))

    src_pts = np.float32(np.array([lu, ru, rd, ld]))
    dst_pts = np.float32(np.array([lu_, ru_, rd_, ld_]))

    h,w,b = im.shape
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    print("H" + str(H))

    imw =  cv2.warpPerspective(im, H, (w, h))

    return imw[lu_[1]:rd_[1], lu_[0]:rd_[0]] # cropping image
border_removal.py 文件源码 项目:idmatch 作者: maddevsio 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def four_point_transform(image, pts):
    rect = order_points(pts)
    (tl, tr, br, bl) = rect
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32"
    )
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped
utils.py 文件源码 项目:idmatch 作者: maddevsio 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def four_point_transform(image, pts):
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped
vignettingFromRandomSteps.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _fitImg(self, img):
        '''
        fit perspective and size of the input image to the reference image
        '''
        img = imread(img, 'gray')
        if self.bg is not None:
            img = cv2.subtract(img, self.bg)

        if self.lens is not None:
            img = self.lens.correct(img, keepSize=True)

        (H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
        H_inv = self.invertHomography(H)

        s = self.obj_shape
        fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
        return fit, img, H, H_inv, n_matches
omr.py 文件源码 项目:omr 作者: rbaron 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def perspective_transform(img, points):
    """Transform img so that points are the new corners"""

    source = np.array(
        points,
        dtype="float32")

    dest = np.array([
        [TRANSF_SIZE, TRANSF_SIZE],
        [0, TRANSF_SIZE],
        [0, 0],
        [TRANSF_SIZE, 0]],
        dtype="float32")

    img_dest = img.copy()
    transf = cv2.getPerspectiveTransform(source, dest)
    warped = cv2.warpPerspective(img, transf, (TRANSF_SIZE, TRANSF_SIZE))
    return warped
lane_detection_module.py 文件源码 项目:diy_driverless_car_ROS 作者: wilselby 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def perspective_transform(image, corners, debug=False, xoffset=0):

     height, width = image.shape[0:2]
     output_size = height/2

     new_top_left=np.array([corners[0,0],0])
     new_top_right=np.array([corners[3,0],0])
     offset=[xoffset,0]    
     img_size = (image.shape[1], image.shape[0])
     src = np.float32([corners[0],corners[1],corners[2],corners[3]])
     dst = np.float32([corners[0]+offset,new_top_left+offset,new_top_right-offset ,corners[3]-offset]) 

     M = cv2.getPerspectiveTransform(src, dst)

     warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)

     if debug:
         drawQuad(image, src, [255, 0, 0])
         drawQuad(warped, dst, [255, 255, 0])
         plt.imshow(image)
         plt.show()
         plt.imshow(warped)
         plt.show()

     return warped,  src,  dst
camera_calibration.py 文件源码 项目:diy_driverless_car_ROS 作者: wilselby 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def perspective_transform(self,  image, debug=True, size_top=70, size_bottom=370):
        height, width = image.shape[0:2]
        output_size = height/2

        #src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
        src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
        dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
        #dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])

        M = cv2.getPerspectiveTransform(src, dst)
        print(M)
        warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)

        if debug:
            self.drawQuad(image, src, [255, 0, 0])
            self.drawQuad(image, dst, [255, 255, 0])
            plt.imshow(image)
            plt.show()

        return warped
utils_multi.py 文件源码 项目:crnn_tf 作者: liuhu-bigeye 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
    num, W, H, _ = ims_src.shape
    if distortion:
        ran_noise = np.random.random((4, 2))
        ran_color = np.random.randn(3,)
    else:
        ran_noise = np.ones((4, 2)) * 0.5
        ran_color = np.zeros(3,)

    # perspective translation
    dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
    noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
    src = np.float32(dst + noise)

    mat = cv2.getPerspectiveTransform(src, dst)
    for i in range(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    # color deviation
    deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
    ims_src += deviation[None, None, None, :]

    return ims_src, mat
utils_multi.py 文件源码 项目:crnn_tf 作者: liuhu-bigeye 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
    num, W, H, _ = ims_src.shape
    if distortion:
        ran_noise = np.random.random((4, 2))
        ran_color = np.random.randn(3,)
    else:
        ran_noise = np.ones((4, 2)) * 0.5
        ran_color = np.zeros(3,)

    # perspective translation
    dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
    noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
    src = np.float32(dst + noise)

    mat = cv2.getPerspectiveTransform(src, dst)
    for i in range(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    # color deviation
    deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
    ims_src += deviation[None, None, None, :]

    return ims_src, mat
preview_dataset.py 文件源码 项目:ego-lane-analysis-system 作者: rodrigoberriel 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def apply_ipm(img, config, ys):
    # IPM
    y_top, y_bottom = min(ys), max(ys)
    ipm_pts = config['dataset']['ipm_points']
    roi = config['dataset']['region_of_interest']

    src = np.array([
        [ipm_pts['@top_left'], y_top],
        [ipm_pts['@top_right'], y_top],
        [ipm_pts['@bottom_right'], y_bottom],
        [ipm_pts['@bottom_left'], y_bottom],

       ], dtype="float32")

    dst = np.array([
        [ipm_pts['@top_left'], 0],
        [ipm_pts['@top_right'], 0],
        [ipm_pts['@top_right'], roi['@height']],
        [ipm_pts['@top_left'], roi['@height']],
       ], dtype="float32")

    M = cv2.getPerspectiveTransform(src, dst)
    return cv2.warpPerspective(img, M, (roi['@width'], roi['@height']))
transform.py 文件源码 项目:Notes2ppt 作者: gsengupta2810 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def four_point_transform(image, pts):

    rect = order_points(pts)
    (tl, tr, br, bl) = rect


    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    return warped
n06_pytorch_utils.py 文件源码 项目:kaggle_amazon_from_space 作者: N01Z3 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def randomShiftScaleRotate(img, shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, u=0.5):
    if random.random() < u:
        height, width, channel = img.shape

        angle = random.uniform(-rotate_limit, rotate_limit)  # degree
        scale = random.uniform(1 - scale_limit, 1 + scale_limit)
        dx = round(random.uniform(-shift_limit, shift_limit)) * width
        dy = round(random.uniform(-shift_limit, shift_limit)) * height

        cc = math.cos(angle / 180 * math.pi) * (scale)
        ss = math.sin(angle / 180 * math.pi) * (scale)
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        img = cv2.warpPerspective(img, mat, (width, height), flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_REFLECT_101)  # cv2.BORDER_CONSTANT, borderValue = (0, 0, 0))  #cv2.BORDER_REFLECT_101

    return img
utils.py 文件源码 项目:answer-sheet-scan 作者: inuyasha2012 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def detect_cnt_again(poly, base_img):
    """
    ???????????????????
    :param poly: ndarray
    :param base_img: ndarray
    :return: ndarray
    """
    # ?????????????????flag
    flag = False

    # ?????????????????????????
    top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
    roi_img = get_roi_img(base_img, bottom_left, bottom_right, top_left, top_right)
    img = get_init_process_img(roi_img)

    # ?????????
    cnt = get_max_area_cnt(img)

    # ?????????????????????
    if cv2.contourArea(cnt) > roi_img.shape[0] * roi_img.shape[1] * SHEET_AREA_MIN_RATIO:
        flag = True
        poly = cv2.approxPolyDP(cnt, cv2.arcLength((cnt,), True) * 0.1, True)
        top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
        if not poly.shape[0] == 4:
            raise PolyNodeCountError

    # ?????????????????
    base_poly_nodes = np.float32([top_left[0], bottom_left[0], top_right[0], bottom_right[0]])
    base_nodes = np.float32([[0, 0],
                            [base_img.shape[1], 0],
                            [0, base_img.shape[0]],
                            [base_img.shape[1], base_img.shape[0]]])
    transmtx = cv2.getPerspectiveTransform(base_poly_nodes, base_nodes)

    if flag:
        img_warp = cv2.warpPerspective(roi_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    else:
        img_warp = cv2.warpPerspective(base_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    return img_warp
main.py 文件源码 项目:FaceSwap 作者: Aravind-Suresh 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def warp_image(img, tM, shape):
    out = np.zeros(shape, dtype=img.dtype)
    # cv2.warpAffine(img,
    #                tM[:2],
    #                (shape[1], shape[0]),
    #                dst=out,
    #                borderMode=cv2.BORDER_TRANSPARENT,
    #                flags=cv2.WARP_INVERSE_MAP)
    cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
                        borderMode=cv2.BORDER_TRANSPARENT,
                        flags=cv2.WARP_INVERSE_MAP)
    return out

# TODO: Modify this method to get a better face contour mask
projection.py 文件源码 项目:LensCalibrator 作者: 1024jp 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def project_image(self, image, size, offset=(0, 0)):
        """Remove parspective from given image.

        Arguments:
        image numpy.array -- Image source in numpy image form.
        size ([int]) -- Size of the output image.
        """
        translation = np.matrix([
            [1.0, 0.0, -offset[0]],
            [0.0, 1.0, -offset[1]],
            [0.0, 0.0, 1.0]
        ])
        matrix = translation * self.homography

        return cv2.warpPerspective(image, matrix, tuple(size))
vision.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
cut.py 文件源码 项目:yonkoma2data 作者: esuji5 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def transform_by4(self, img, points):
        points = sorted(points, key=lambda x: x[1])
        if len(points) == 4:
            top = sorted(points[:2], key=lambda x: x[0])
            bottom = sorted(points[2:], key=lambda x: x[0], reverse=True)
            points = np.array(top + bottom, dtype='float32')
        else:
            y_min, y_max = points[0][1], points[-1][1]
            points = sorted(points, key=lambda x: x[0])
            x_min, x_max = points[0][0], points[-1][0]
            points = np.array([np.array([x_min, y_min]),
                               np.array([x_max, y_min]),
                               np.array([x_max, y_max]),
                               np.array([x_min, y_max])],
                              np.float32)

        width = max(np.sqrt(((points[0][0] - points[2][0]) ** 2) * 2),
                    np.sqrt(((points[1][0] - points[3][0]) ** 2) * 2))
        height = max(np.sqrt(((points[0][1] - points[2][1]) ** 2) * 2),
                     np.sqrt(((points[1][1] - points[3][1]) ** 2) * 2))

        dst = np.array([np.array([0, 0]),
                        np.array([width - 1, 0]),
                        np.array([width - 1, height - 1]),
                        np.array([0, height - 1]),
                        ], np.float32)

        # ??????????????????????????
        trans = cv2.getPerspectiveTransform(points, dst)
        return cv2.warpPerspective(img, trans, (int(width), int(height)))
slantcorrection.py 文件源码 项目:handfontgen 作者: nixeneko 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def transform(image, rectpoints, dpmm):
    docpxls = (int(DOCSIZE[0]*dpmm),int(DOCSIZE[1]*dpmm))
    docrect = np.array(
                [(0,0), (docpxls[0], 0), (docpxls[0], docpxls[1]), (0, docpxls[1])],
                'float32')
    transmat = cv2.getPerspectiveTransform(np.array(rectpoints, 'float32'), docrect)
    return cv2.warpPerspective(image, transmat, docpxls)
Juggalo.py 文件源码 项目:inyourface 作者: yacomink 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def maskMouth(self, frame_image, face):
        elements = cv2.imread(self.__class__.mask_mouth_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_mouth_points, np.array(self.getMouthPoints(face)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))

        frame_image.paste(mask_elements, (0,0), mask_elements)
Ham.py 文件源码 项目:inyourface 作者: yacomink 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def maskFace(self, frame_image, face):

        elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
        frame_image.paste(mask_elements, (0,0), mask_elements)
synthgen.py 文件源码 项目:SynthText 作者: ankush-me 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def warpHomography(self,src_mat,H,dst_size):
        dst_mat = cv2.warpPerspective(src_mat, H, dst_size,
                                      flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
        return dst_mat


问题


面经


文章

微信
公众号

扫码关注公众号