python类SIFT的实例源码

roomba.py 文件源码 项目:Roomba980-Python 作者: NickWaterton 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image
unit_tests.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_descriptors():
    img = cv2.imread(constants.TESTING_IMG_PATH)
    cv2.imshow("Normal Image", img)
    print("Normal Image")
    option = input("Enter [1] for using ORB features and other number to use SIFT.\n")
    start = time.time()
    if option == 1:
        orb = cv2.ORB()
        kp, des = orb.detectAndCompute(img, None)
    else:
        sift = cv2.SIFT()
        kp, des = sift.detectAndCompute(img, None)
    end = time.time()
    elapsed_time = utils.humanize_time(end - start)
    des_name = constants.ORB_FEAT_NAME if option == ord(constants.ORB_FEAT_OPTION_KEY) else constants.SIFT_FEAT_NAME
    print("Elapsed time getting descriptors {0}".format(elapsed_time))
    print("Number of descriptors found {0}".format(len(des)))
    if des is not None and len(des) > 0:
        print("Dimension of descriptors {0}".format(len(des[0])))
    print("Name of descriptors used is {0}".format(des_name))
    img2 = cv2.drawKeypoints(img, kp)
    # plt.imshow(img2), plt.show()
    cv2.imshow("{0} descriptors".format(des_name), img2)
    print("Press any key to exit ...")
    cv2.waitKey()
unit_tests.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_codebook():
    dataset = pickle.load(open(constants.DATASET_OBJ_FILENAME, "rb"))
    option = input("Enter [1] for using ORB features or [2] to use SIFT features.\n")
    start = time.time()
    des = descriptors.all_descriptors(dataset, dataset.get_train_set(), option)
    end = time.time()
    elapsed_time = utils.humanize_time(end - start)
    print("Elapsed time getting all the descriptors is {0}".format(elapsed_time))
    k = 64
    des_name = constants.ORB_FEAT_NAME if option == constants.ORB_FEAT_OPTION else constants.SIFT_FEAT_NAME
    codebook_filename = "codebook_{0}_{1}.csv".format(k, des_name)
    start = time.time()
    codebook = descriptors.gen_codebook(dataset, des, k)
    end = time.time()
    elapsed_time = utils.humanize_time(end - start)
    print("Elapsed time calculating the k means for the codebook is {0}".format(elapsed_time))
    np.savetxt(codebook_filename, codebook, delimiter=constants.NUMPY_DELIMITER)
    print("Codebook loaded in {0}, press any key to exit ...".format(constants.CODEBOOK_FILE_NAME))
    cv2.waitKey()
unit_tests.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_one_img_classification():
    img = cv2.imread("test.jpg")
    resize_to = 640
    h, w, channels = img.shape
    img = utils.resize(img, resize_to, h, w)
    des = descriptors.sift(img)
    k = 128
    des_name = "SIFT"
    codebook_filename = filenames.codebook(k, des_name)
    codebook = utils.load(codebook_filename)
    img_vlad = descriptors.vlad(des, codebook)
    svm_filename = filenames.svm(k, des_name)
    svm = cv2.SVM()
    svm.load(svm_filename)
    result = svm.predict(img_vlad)
    print("result is {0}".format(result))
sift.py 文件源码 项目:AlphaLogo 作者: gigaflw 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cv2_match(im1, im2):
    mysift = SIFT()
    sift = cv2.SIFT()
    bf = cv2.BFMatcher()


    kp1, dp1 = sift.detectAndCompute(im1, None)
    kp2, dp2 = sift.detectAndCompute(im2, None)
    matches_ = bf.knnMatch(dp1, dp2, k=2)

    print(len(matches_))
    good = []
    for m, n in matches_:
        if m.distance < 0.90 * n.distance:
            good.append(m)
    print(len(good))

    pos1 = [(int(kp.pt[1]), int(kp.pt[0])) for kp in kp1]
    pos2 = [(int(kp.pt[1]), int(kp.pt[0])) for kp in kp2]
    matches = [(m.queryIdx, m.trainIdx, 0.15) for m in good]

    cv2.imwrite("cvkp1.jpg", cv2.drawKeypoints(im, kp1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
    cv2.imwrite("cvkp2.jpg", cv2.drawKeypoints(imm, kp2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
    mysift.draw_matches(im, pos1, imm, pos2, matches, 'ckmatch.jpg')
compute_fv_ucf.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def dictionary(descriptors, N):
    em = cv2.EM(N)
    em.train(descriptors)

        pdb.set_trace()
    return np.float32(em.getMat("means")), \
        np.float32(em.getMatVector("covs")), np.float32(em.getMat("weights"))[0]

#def image_descriptors(file):
#   img = cv2.imread(file, 0)
#   img = cv2.resize(img, (256, 256))
#   _ , descriptors = cv2.SIFT().detectAndCompute(img, None)
#   return descriptors
compute_fv.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def dictionary(descriptors, N):
        pdb.set_trace()
    em = cv2.EM(N)
    em.train(descriptors)

    return np.float32(em.getMat("means")), \
        np.float32(em.getMatVector("covs")), np.float32(em.getMat("weights"))[0]

#def image_descriptors(file):
#   img = cv2.imread(file, 0)
#   img = cv2.resize(img, (256, 256))
#   _ , descriptors = cv2.SIFT().detectAndCompute(img, None)
#   return descriptors
descriptors.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def sift(img):
    """
    Gets a list of 128 - dimensional descriptors using SIFT and DoG
    for keypoints and resizes the image having the larger dimension set to 640
    and keeping the size relation.

    Args:
        img (BGR matrix): The grayscale image that will be used.

    Returns:
        list of floats array: The descriptors found in the image.
    """
    sift = cv2.SIFT()
    kp, des = sift.detectAndCompute(img, None)
    return des
descriptors.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def all_descriptors(dataset, class_list, option = constants.ORB_FEAT_OPTION):
    """
    Gets every local descriptor of a set with different classes (This is useful for getting a codebook).

    Args:
        class_list (list of arrays of strings): The list has information for a specific class in each element and each
            element is an array of strings which are the paths for the image of that class.
        option (integer): It's 49 (the key '1') if ORB features are going to be used, else use SIFT features.

    Returns:
        numpy float matrix: Each row are the descriptors found in an image of the set
    """
    des = None
    for i in range(len(class_list)):
        message = "*** Getting descriptors for class number {0} of {1} ***".format(i, len(class_list))
        print(message)
        class_img_paths = class_list[i]
        new_des = descriptors_from_class(dataset, class_img_paths, i, option)
        if des is None:
            des = new_des
        else:
            des = np.vstack((des, new_des))
    message = "*****************************\n"\
              "Finished getting all the descriptors\n"
    print(message)
    print("Total number of descriptors: {0}".format(len(des)))
    if len(des) > 0:
        print("Dimension of descriptors: {0}".format(len(des[0])))
        print("First descriptor:\n{0}".format(des[0]))
    return des
descriptors.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def Sift(gray):
    sift = cv2.SIFT()
    kps, des = sift.detectAndCompute(gray, None)
    return kps, des
descriptors.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, descriptor_type):
        self.rootsift = False
        lists = ['sift','rootsift','orb','surf']
        if descriptor_type is 'sift':
            self.lfe = cv2.SIFT()
        elif descriptor_type is 'surf':
            self.lfe = cv2.SURF()
        elif descriptor_type is 'rootsift':
            self.lfe = cv2.SIFT()
        elif descriptor_type is 'orb':
            self.lfe = cv2.ORB()
        else:
            assert(descriptor_type in lists)
server.py 文件源码 项目:indices 作者: shekharshank 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def sift(imageval):
    file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
        img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
    gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)
    #surf = cv2.SURF(400)
    sift = cv2.SIFT(40)
    kp, des = sift.detectAndCompute(gray,None)
    #kp, des = surf.detectAndCompute(gray,None)
    #print len(kp)
server.py 文件源码 项目:indices 作者: shekharshank 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def surf(imageval):
    file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
        img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
    gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)
    surf = cv2.SURF(40)
    #sift = cv2.SIFT(40)
    #kp, des = sift.detectAndCompute(gray,None)
    kp, des = surf.detectAndCompute(gray,None)
    #print len(kp)
cars.py 文件源码 项目:OCV_Vehicles_Features 作者: dan-masek 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def main():
    log = logging.getLogger("main")

    log.debug("Loading keypoint data from '%s'...", KEYPOINT_DATA_FILE)
    keypoint_data = KeypointData.load(KEYPOINT_DATA_FILE)

    log.debug("Creating SIFT detector...")
    sift = cv2.SIFT(nfeatures=0, nOctaveLayers=5, contrastThreshold=0.05, edgeThreshold=30, sigma=1.5)
    bf = cv2.BFMatcher()

    # Set up image source
    log.debug("Initializing video capture device #%s...", IMAGE_SOURCE)
    cap = cv2.VideoCapture(IMAGE_SOURCE)

    frame_width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
    log.debug("Video capture frame size=(w=%d, h=%d)", frame_width, frame_height)

    log.debug("Starting capture loop...")
    frame_number = -1
    while True:
        frame_number += 1
        log.debug("Capturing frame #%d...", frame_number)
        ret, frame = cap.read()
        if not ret:
            log.error("Frame capture failed, stopping...")
            break

        log.debug("Got frame #%d: shape=%s", frame_number, frame.shape)


        # Archive raw frames from video to disk for later inspection/testing
        if CAPTURE_FROM_VIDEO:
            save_frame(IMAGE_FILENAME_FORMAT
                , frame_number, frame, "source frame #%d")

        log.debug("Processing frame #%d...", frame_number)
        processed = process_frame(frame_number, frame, keypoint_data, sift, bf)

        save_frame(IMAGE_DIR + "/processed_%04d.png"
            , frame_number, processed, "processed frame #%d")

        cv2.imshow('Source Image', frame)
        cv2.imshow('Processed Image', processed)

        log.debug("Frame #%d processed.", frame_number)

        c = cv2.waitKey(WAIT_TIME)
        if c == 27:
            log.debug("ESC detected, stopping...")
            break

    log.debug("Closing video capture device...")
    cap.release()
    cv2.destroyAllWindows()
    log.debug("Done.")

# ============================================================================
featuredetector.py 文件源码 项目:bib-tagger 作者: KateRita 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def findMatchesBetweenImages(image_1, image_2):
  """ Return the top 10 list of matches between two input images.

  This function detects and computes SIFT (or ORB) from the input images, and
  returns the best matches using the normalized Hamming Distance.

  Args:
    image_1 (numpy.ndarray): The first image (grayscale).
    image_2 (numpy.ndarray): The second image. (grayscale).

  Returns:
    image_1_kp (list): The image_1 keypoints, the elements are of type
                       cv2.KeyPoint.
    image_2_kp (list): The image_2 keypoints, the elements are of type
                       cv2.KeyPoint.
    matches (list): A list of matches, length 10. Each item in the list is of
                    type cv2.DMatch.

  """
  # matches - type: list of cv2.DMath
  matches = None
  # image_1_kp - type: list of cv2.KeyPoint items.
  image_1_kp = None
  # image_1_desc - type: numpy.ndarray of numpy.uint8 values.
  image_1_desc = None
  # image_2_kp - type: list of cv2.KeyPoint items.
  image_2_kp = None
  # image_2_desc - type: numpy.ndarray of numpy.uint8 values.
  image_2_desc = None

  # WRITE YOUR CODE HERE.
  #init
  sift = SIFT()

  #1. Compute SIFT keypoints and descriptors for both images
  image_1_kp, image_1_desc = sift.detectAndCompute(image_1,None)
  image_2_kp, image_2_desc = sift.detectAndCompute(image_2,None)

  #2. Create a Brute Force Matcher, using the hamming distance (and set crossCheck to true).
  #create BFMatcher object
  bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

  #3. Compute the matches between both images.
  #match descriptors
  matches = bf.match(image_1_desc,image_2_desc)

  #4. Sort the matches based on distance so you get the best matches.
  matches = sorted(matches, key=lambda x: x.distance)

  #5. Return the image_1 keypoints, image_2 keypoints, and the top 10 matches in a list.

  return image_1_kp, image_2_kp, matches[:10]
  # END OF FUNCTION.
image.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def find_image_position(origin='origin.png', query='query.png', outfile=None):
    '''
    find all image positions
    @return None if not found else a tuple: (origin.shape, query.shape, postions)
    might raise Exception
    '''
    img1 = cv2.imread(query, 0) # query image(small)
    img2 = cv2.imread(origin, 0) # train image(big)

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)
    print len(kp1), len(kp2)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    # flann
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    print len(kp1), len(kp2), 'good cnt:', len(good)

    if len(good)*1.0/len(kp1) < 0.5:
    #if len(good)<MIN_MATCH_COUNT:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        return img2.shape, img1.shape, []

    queryPts = []
    trainPts = []
    for dm in good:
        queryPts.append(kp1[dm.queryIdx])
        trainPts.append(kp2[dm.trainIdx])

    img3 = cv2.drawKeypoints(img1, queryPts)
    cv2.imwrite('image/query.png', img3)

    img3 = cv2.drawKeypoints(img2, trainPts)
    point = _middlePoint(trainPts)
    print 'position in', point

    if outfile:
        edge = 10
        top_left = (point[0]-edge, point[1]-edge)
        bottom_right = (point[0]+edge, point[1]+edge)
        cv2.rectangle(img3, top_left, bottom_right, 255, 2)
        cv2.imwrite(outfile, img3)
    return img2.shape, img1.shape, [point]
image.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def find_image_position(origin='origin.png', query='query.png', outfile=None):
    '''
    find all image positions
    @return None if not found else a tuple: (origin.shape, query.shape, postions)
    might raise Exception
    '''
    img1 = cv2.imread(query, 0) # query image(small)
    img2 = cv2.imread(origin, 0) # train image(big)

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)
    print len(kp1), len(kp2)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    # flann
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    print len(kp1), len(kp2), 'good cnt:', len(good)

    if len(good)*1.0/len(kp1) < 0.5:
    #if len(good)<MIN_MATCH_COUNT:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        return img2.shape, img1.shape, []

    queryPts = []
    trainPts = []
    for dm in good:
        queryPts.append(kp1[dm.queryIdx])
        trainPts.append(kp2[dm.trainIdx])

    img3 = cv2.drawKeypoints(img1, queryPts)
    cv2.imwrite('image/query.png', img3)

    img3 = cv2.drawKeypoints(img2, trainPts)
    point = _middlePoint(trainPts)
    print 'position in', point

    if outfile:
        edge = 10
        top_left = (point[0]-edge, point[1]-edge)
        bottom_right = (point[0]+edge, point[1]+edge)
        cv2.rectangle(img3, top_left, bottom_right, 255, 2)
        cv2.imwrite(outfile, img3)
    return img2.shape, img1.shape, [point]
descriptors.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def descriptors_from_class(dataset, class_img_paths, class_number, option = constants.ORB_FEAT_OPTION):
    """
    Gets all the local descriptors for a class. If an image has a side with more than 640 pixels it will be resized
    leaving the biggest side at 640 pixels and conserving the aspect ratio for the other side.

    Args:
        dataset (Dataset object): An object that stores information about the dataset.
        class_img_paths (array of strings): The paths for each image in certain class.
        class_number (integer): The number of the class.
        option (integer): If this is 49 (The key '1') uses ORB features, else use SIFT.

    Returns:
        numpy float matrix: Each row are the descriptors found in an image of the class
    """
    des = None
    step = (constants.STEP_PERCENTAGE * len(class_img_paths)) / 100
    for i in range(len(class_img_paths)):
        img_path = class_img_paths[i]
        img = cv2.imread(img_path)
        resize_to = 640
        h, w, channels = img.shape
        if h > resize_to or w > resize_to:
            img = utils.resize(img, resize_to, h, w)
        if option == constants.ORB_FEAT_OPTION:
            des_name = "ORB"
            new_des = orb(img)
        else:
            des_name = "SIFT"
            new_des = sift(img)
        if new_des is not None:
            if des is None:
                des = np.array(new_des, dtype=np.float32)
            else:
                des = np.vstack((des, np.array(new_des)))
        # Print a message to show the status of the function
        if i % step == 0:
            percentage = (100 * i) / len(class_img_paths)
            message = "Calculated {0} descriptors for image {1} of {2}({3}%) of class number {4} ...".format(
                des_name, i, len(class_img_paths), percentage, class_number
            )
            print(message)
    message = "* Finished getting the descriptors for the class number {0}*".format(class_number)
    print(message)
    print("Number of descriptors in class: {0}".format(len(des)))
    dataset.set_class_count(class_number, len(des))
    return des
servoing_designed_features_quad_panda3d_env.py 文件源码 项目:citysim3d 作者: alexlee-gk 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, action_space, feature_type=None, filter_features=None,
                 max_time_steps=100, distance_threshold=4.0, **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]),
                                                            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node),
                                                          size=film_size,
                                                          near_far=(lens.getNear(), lens.getFar()),
                                                          hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" % self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None
roomba.py 文件源码 项目:Roomba980-Python 作者: NickWaterton 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image
TestImgMatch.py 文件源码 项目:LearnHash 作者: galad-loth 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def TestKptMatch():    
    img1=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img1.ppm",cv2.IMREAD_COLOR)
    img2=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img2.ppm",cv2.IMREAD_COLOR)
    gray1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
    gray2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    gap_width=20
    black_gap=npy.zeros((img1.shape[0],gap_width),dtype=npy.uint8)

#    objSIFT = cv2.SIFT(500)
#    kpt1,desc1 = objSIFT.detectAndCompute(gray1,None) 
#    kpt2,desc2 = objSIFT.detectAndCompute(gray2,None) 
#    objMatcher=cv2.BFMatcher(cv2.NORM_L2)
#    matches=objMatcher.knnMatch(desc1,desc2,k=2)

    objORB = cv2.ORB(500)
    kpt1,desc1 = objORB.detectAndCompute(gray1,None) 
    kpt2,desc2 = objORB.detectAndCompute(gray2,None) 
    objMatcher=cv2.BFMatcher(cv2.NORM_HAMMING)
    matches=objMatcher.knnMatch(desc1,desc2,k=2)

    goodMatches=[]
    for bm1,bm2 in matches:
        if bm1.distance < 0.7*bm2.distance:
            goodMatches.append(bm1)

    if len(goodMatches)>10:
        ptsFrom = npy.float32([kpt1[bm.queryIdx].pt for bm in goodMatches]).reshape(-1,1,2)
        ptsTo = npy.float32([kpt2[bm.trainIdx].pt for bm in goodMatches]).reshape(-1,1,2)
        matH, matchMask = cv2.findHomography(ptsFrom, ptsTo, cv2.RANSAC,5.0)

    imgcnb=npy.concatenate((gray1,black_gap,gray2),axis=1)

    plt.figure(1,figsize=(15,6))
    plt.imshow(imgcnb,cmap="gray")
    idx=0
    for bm in goodMatches:
        if 1==matchMask[idx]:
            kptFrom=kpt1[bm.queryIdx]
            kptTo=kpt2[bm.trainIdx]
            plt.plot(kptFrom.pt[0],kptFrom.pt[1],"rs",
                     markerfacecolor="none",markeredgecolor="r",markeredgewidth=2)
            plt.plot(kptTo.pt[0]+img1.shape[1]+gap_width,kptTo.pt[1],"bo",
                     markerfacecolor="none",markeredgecolor="b",markeredgewidth=2)
            plt.plot([kptFrom.pt[0],kptTo.pt[0]+img1.shape[1]+gap_width],
                     [kptFrom.pt[1],kptTo.pt[1]],"g-",linewidth=2)
        idx+=1
    plt.axis("off")


问题


面经


文章

微信
公众号

扫码关注公众号