python类BFMatcher()的实例源码

Q3Support.py 文件源码 项目:Recognition 作者: thautwarm 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def SIFTMATCH(img1,img2):
    img1=img1.copy()
    img2=img2.copy()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = Sift.detectAndCompute(img1,None)
    kp2, des2 = Sift.detectAndCompute(img2,None)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    # Apply ratio test
    matchesMask = [[0,0] for i in range(len(matches))]
    for i,(m,n) in enumerate(matches):
        if 0.55*n.distance<m.distance < 0.80*n.distance:
            matchesMask[i]=[1,0]
            # cv2.drawMatchesKnn expects list of lists as matches.
    img3=None
    draw_params=dict(matchesMask=matchesMask)
    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,flags=2,**draw_params)
#    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,img3,flags=2)
    plt.imshow(img3,cmap='gray')
Q3Support.py 文件源码 项目:Recognition 作者: thautwarm 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def SIFTMATCHPOINTS(img1,img2):
    img1=img1.copy()
    img2=img2.copy()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = Sift.detectAndCompute(img1,None)
    kp2, des2 = Sift.detectAndCompute(img2,None)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    # Apply ratio test
    matchesMask =np.array( [0 for i in range(len(matches))])
    good=[]
    for i,(m,n) in enumerate(matches):
        if 0.50*n.distance<m.distance < 0.85*n.distance:
            good.append(m)
            matchesMask[i]=1
    src_pts = [ tuple([int(pos) for pos in  kp1[m.queryIdx].pt]) for m in good ]
    dst_pts = [ tuple([int(pos) for pos in  kp2[m.trainIdx].pt]) for m in good ]
    return dict(zip(src_pts,dst_pts))
#    kp1=np.array(kp1)[matchesMask==1]
#    kp2=np.array(kp2)[matchesMask==1]
#    kp1pt=list(map(lambda x: tuple([int(posi) for posi in x.pt]),kp1))
#    kp2pt=list(map(lambda x: tuple([int(posi) for posi in x.pt]),kp2))
#    return dict(zip(kp1pt,kp2pt))
Q3Support.py 文件源码 项目:Recognition 作者: thautwarm 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def SIFTMATCHCOUNT(img1,img2):
    img1=img1.copy()
    img2=img2.copy()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = Sift.detectAndCompute(img1,None)
    kp2, des2 = Sift.detectAndCompute(img2,None)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    if len(np.array(matches).shape)!=2 or np.array(matches).shape[1]!=2:
        return 0
    # Apply ratio test
    good = []
    for m,n in matches:
        if 0.50*n.distance<m.distance < 0.80*n.distance:
            good.append([m])
    return len(good)
trainer_matches.py 文件源码 项目:Yugioh-bot 作者: will7200 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)

        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and y2 > self.yThreshold and x2 < self.xThreshold:
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster = self.compare_distances(train_img, cluster)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, good_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True
sift.py 文件源码 项目:AlphaLogo 作者: gigaflw 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cv2_match(im1, im2):
    mysift = SIFT()
    sift = cv2.SIFT()
    bf = cv2.BFMatcher()


    kp1, dp1 = sift.detectAndCompute(im1, None)
    kp2, dp2 = sift.detectAndCompute(im2, None)
    matches_ = bf.knnMatch(dp1, dp2, k=2)

    print(len(matches_))
    good = []
    for m, n in matches_:
        if m.distance < 0.90 * n.distance:
            good.append(m)
    print(len(good))

    pos1 = [(int(kp.pt[1]), int(kp.pt[0])) for kp in kp1]
    pos2 = [(int(kp.pt[1]), int(kp.pt[0])) for kp in kp2]
    matches = [(m.queryIdx, m.trainIdx, 0.15) for m in good]

    cv2.imwrite("cvkp1.jpg", cv2.drawKeypoints(im, kp1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
    cv2.imwrite("cvkp2.jpg", cv2.drawKeypoints(imm, kp2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
    mysift.draw_matches(im, pos1, imm, pos2, matches, 'ckmatch.jpg')
test_gms_matcher.py 文件源码 项目:prototype 作者: chutsu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_compute_matches(self):
        orb = cv2.ORB_create(10000)
        orb.setFastThreshold(0)
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
        gms = GmsMatcher(orb, matcher)

        kp0, des0 = orb.detectAndCompute(self.img0, np.array([]))
        kp1, des1 = orb.detectAndCompute(self.img1, np.array([]))
        matches = matcher.match(des0, des1)

        matches = gms.compute_matches(kp0, kp1, des0, des1, matches, self.img0)

        self.assertTrue(len(matches) > 0)

    # def test_compute_matches2(self):
    #     orb = cv2.ORB_create(1000)
    #     orb.setFastThreshold(0)
    #     matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
    #     gms = GmsMatcher(orb, matcher)
    #
    #     camera = Camera()
    #     img0 = camera.update()
    #
    #     while True:
    #         img1 = camera.update()
    #         matches = gms.compute_matches(img0, img1)
    #         gms.draw_matches(img0, img1)
    #
    #         img0 = img1
    #
    #         # matches_img = draw_matches(img0, img1, kp0, kp1, matches)
    #         # cv2.imshow("Mathces", matches_img)
    #         # if cv2.waitKey(1) == 113:
    #         #     exit(0)
    #
    #     self.assertTrue(len(matches) > 0)
test_ransac.py 文件源码 项目:prototype 作者: chutsu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setUp(self):
        self.image_height = 600
        self.ransac = VerticalRANSAC(self.image_height)

        # Load test images
        data_path = test.TEST_DATA_PATH
        img0 = cv2.imread(os.path.join(data_path, "vo", "0.png"))
        img1 = cv2.imread(os.path.join(data_path, "vo", "1.png"))

        # Detect features
        tracker = FeatureTracker()
        f0 = tracker.detect(img0)
        f1 = tracker.detect(img1)

        # Convert Features to cv2.KeyPoint and descriptors (np.array)
        kps0 = [cv2.KeyPoint(f.pt[0], f.pt[1], f.size) for f in f0]
        des0 = np.array([f.des for f in f0])
        kps1 = [cv2.KeyPoint(f.pt[0], f.pt[1], f.size) for f in f1]
        des1 = np.array([f.des for f in f1])

        # Perform matching and sort based on distance
        # Note: arguments to the brute-force matcher is (query descriptors,
        # train descriptors), here we use des1 as the query descriptors becase
        # des1 represents the latest descriptors from the latest image frame
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = matcher.match(des1, des0)
        matches = sorted(matches, key=lambda x: x.distance)

        # Prepare data for RANSAC outlier rejection
        self.src_pts = np.float32([kps0[m.trainIdx].pt for m in matches])
        self.dst_pts = np.float32([kps1[m.queryIdx].pt for m in matches])
features.py 文件源码 项目:prototype 作者: chutsu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, **kwargs):
        self.debug_mode = kwargs.get("debug_mode", False)
        self.nb_features = kwargs.get("nb_features", 500)
        self.nb_levels = kwargs.get("nb_levels", 4)

        # Detector and matcher
        self.detector = FAST(threshold=2)
        self.descriptor = ORB(nfeatures=self.nb_features,
                              nlevels=self.nb_levels)
        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        self.ransac = None

        # Counters
        self.counter_frame_id = -1
        self.counter_track_id = -1

        # Feature tracks
        self.tracks_tracking = []
        self.tracks_lost = []
        self.tracks_buffer = {}
        self.max_buffer_size = 5000

        # Image, feature, unmatched features references
        self.img_ref = None
        self.fea_ref = None
        self.unmatched = []
engine.py 文件源码 项目:vse 作者: mkpaszkiewicz 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_vse(vocabulary_path, recognized_visual_words=1000):
    """Create visual search engine with default configuration."""
    ranker = SimpleRanker(hist_comparator=Intersection())
    inverted_index = InvertedIndex(ranker=ranker, recognized_visual_words=recognized_visual_words)
    bag_of_visual_words = BagOfVisualWords(extractor=cv2.xfeatures2d.SURF_create(),
                                           matcher=cv2.BFMatcher(normType=cv2.NORM_L2),
                                           vocabulary=load(vocabulary_path))
    return VisualSearchEngine(inverted_index, bag_of_visual_words)
sift_image.py 文件源码 项目:ocular 作者: wolfd 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def correspondences(self, other):
        # find corresponding points in the input image and the template image
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(self.des, other.des, k=2)

        # Apply Lowe Ratio Test to the keypoints
        # this should weed out unsure matches
        good_keypoints = []
        for m, n in matches:
            if m.distance < self.good_thresh * n.distance:
                good_keypoints.append(m)

        if DEBUG_SIFT:
            draw_matches(
                self.image, self.kp,
                other.image, other.kp,
                good_keypoints[-50:]
            )

        # put keypoints from own image in self_pts
        # transform the keypoint data into arrays for homography check
        # grab precomputed points
        self_pts = np.float32(
            [self.kp[m.queryIdx].pt for m in good_keypoints]
        ).reshape(-1, 2)

        # put corresponding keypoints from other image in other_pts
        other_pts = np.float32(
            [other.kp[m.trainIdx].pt for m in good_keypoints]
        ).reshape(-1, 2)

        return (self_pts, other_pts)
matcher.py 文件源码 项目:imagepy 作者: Image-Py 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def match(self, desc1, desc2):
        matcher = cv2.BFMatcher(cv2.NORM_L2)
        pair = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 1)
        lt = [(i[0].distance, i[0].queryIdx, i[0].trainIdx) for i in pair]
        return np.array(sorted(lt))[:,1:].astype(np.int16)
utils.py 文件源码 项目:LearnHash 作者: galad-loth 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def GetKnnIdx(queryData,baseData,numNN, metric=0):
    if (metric==0):
        objMatcher=cv2.BFMatcher(cv2.NORM_L2)
    elif (metric==1):
        objMatcher=cv2.BFMatcher(cv2.NORM_HAMMING)
    matches=objMatcher.knnMatch(queryData,baseData,k=numNN)
    idxKnn=npy.zeros((queryData.shape[0],numNN), dtype=npy.int32)
    for kk in range(queryData.shape[0]):
        for ll in range(numNN):
            idxKnn[kk][ll]=matches[kk][ll].trainIdx
    return idxKnn
Utils.py 文件源码 项目:LearnHash 作者: galad-loth 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def GetKnnIdx(queryData,baseData,numNN, metric=0):
    if (metric==0):
        objMatcher=cv2.BFMatcher(cv2.NORM_L2)
    elif (metric==1):
        objMatcher=cv2.BFMatcher(cv2.NORM_HAMMING)
    matches=objMatcher.knnMatch(queryData,baseData,k=numNN)
    idxKnn=npy.zeros((queryData.shape[0],numNN), dtype=npy.int32)
    for kk in range(queryData.shape[0]):
        for ll in range(numNN):
            idxKnn[kk][ll]=matches[kk][ll].trainIdx
    return idxKnn
main.py 文件源码 项目:CS412_ComputerVision 作者: Tmbao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def main(argv):
  if len(argv) == 2:
    detector = detectors.get_detector(argv[0], params[argv[0]])

    image = cv2.cvtColor(cv2.imread(argv[1]), cv2.COLOR_BGR2GRAY)

    keypoints = detector.detect(image)

    visualize_keypoints(image, keypoints)

  elif len(argv) == 5:
    detector = detectors.get_detector(argv[1], params[argv[1]])
    descriptor = descriptors.get_descriptor(argv[2])
    matcher = cv2.BFMatcher()

    image1 = cv2.cvtColor(cv2.imread(argv[3]), cv2.COLOR_BGR2GRAY)
    image2 = cv2.cvtColor(cv2.imread(argv[4]), cv2.COLOR_BGR2GRAY)

    keypoints1 = detector.detect(image1)
    keypoints2 = detector.detect(image2)

    keypoints1, descriptors1 = descriptor.compute(image1, keypoints1)
    keypoints2, descriptors2 = descriptor.compute(image2, keypoints2)

    print type(descriptors1), type(descriptors2)

    matches = matcher.knnMatch(descriptors1, descriptors2, k=2)
    matches = sorted(matches, key=lambda x: x[0].distance)

    visualize_matches(image1, keypoints1, image2, keypoints2, matches[:100])
szymon_keypoints_in_images.py 文件源码 项目:PKM2 作者: Szonek 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def main():
    checkOpennCVVersion()
    img1 = cv2.imread('napis_z_tlem.png', 0)  # duzy obrazek
    img2 = cv2.imread('napis.png', 0)  # maly obrazek, tego szukamy w duzym
    orb = cv2.ORB()
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)


    #zapis do pliku wynikowych keypointow
    imgKP1 = cv2.drawKeypoints(img1, kp1)
    cv2.imwrite('orb_keypoints_big.jpg', imgKP1)

    imgKP2 = cv2.drawKeypoints(img2, kp2)
    cv2.imwrite('orb_keypoints.jpg', imgKP2)


    matcher = cv2.BFMatcher(cv2.NORM_L2)
    matches = matcher.knnMatch(des1, trainDescriptors=des2, k=2)
    pairs = filterMatches(kp1, kp2, matches)

    l1 = len( kp1 )
    l2 = len( kp2 )
    lp = len( pairs )
    r = (lp * 100) / l1
    print r, "%"
    cv2.waitKey()
    cv2.destroyAllWindows()
    return None

#funkcja wywolowywana przed mainem. By uzyc ORB musimy byc pewni ze mamy wersje opencv 2.4
PixelSearch.py 文件源码 项目:pytomatic 作者: N0K0 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def find_features_in_array_SIFT(self, sub_image, main_image, debug=False):
        # Initiate SIFT detector
        sift = SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(sub_image, None)
        kp2, des2 = sift.detectAndCompute(main_image, None)

        # BFMatcher with default params
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)

        logging.debug("Found {} possible matches".format(len(matches)))

        ret_list = []
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append([m])

        good.sort(key=lambda x: x[0].distance)

        if debug:
            # cv2.drawMatchesKnn expects list of lists as matches.
            img3 = cv2.drawMatchesKnn(sub_image, kp1, main_image, kp2, good, flags=2, outImg=None,
                                      matchColor=(255, 255, 0))
            plt.imshow(img3), plt.show()

        ret_list = []
        for match in good:
            index = match[0].trainIdx
            point = kp2[index].pt
            ret_list.append((int(point[0]), int(point[1])))

        logging.debug("After filtering {}".format(len(good)))
        return ret_list
affine_T.py 文件源码 项目:Analog-Utility-Meter-Reader 作者: arjun372 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def main():
    stream=urllib.urlopen(CAM_URL)
    bytes=''
    ts=time.time()
    while True:
        bytes+=stream.read(2048)
        a = bytes.find('\xff\xd8')
        b = bytes.find('\xff\xd9')
        if a==-1 or b==-1:
            continue

        # Frame available
        rtimestamp=time.time()
        jpg = bytes[a:b+2]
        bytes= bytes[b+2:]
        img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
        cv2.imshow('RAW',img)

        #ORB to get corresponding points
        kp, des = orb.detectAndCompute(img,None)
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des_ref,des)
        matches = sorted(matches, key = lambda x:x.distance)
        img3 = cv2.drawMatches(img_ref,kp_ref,img,kp,matches[:4], None,flags=2)
        cv2.imshow('Matches',img3)

#        pts_src = np.float32([[kp_ref[0].pt[0],kp_ref[0].pt[1]],[kp_ref[1].pt[0],kp_ref[1].pt[1]],[kp_ref[0].pt[0],kp_ref[0].pt[1]],[kp_ref[0].pt[0],kp_ref[0].pt[1]]
        # Perspective Transform
        pts1 = np.float32([[50,50],[200,50],[50,200]])
        pts2 = np.float32([[10,100],[200,50],[100,250]])
        Tr_M = cv2.getAffineTransform(pts1,pts2)
        oimg = cv2.warpAffine(img,Tr_M,(cols,rows))
        cv2.imshow('Perspective Transform',oimg)

        # Print lag
        print(time.time()-ts)
        ts=time.time()

        if cv2.waitKey(1) == 27:
            exit(0)
cars.py 文件源码 项目:OCV_Vehicles_Features 作者: dan-masek 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main():
    log = logging.getLogger("main")

    log.debug("Loading keypoint data from '%s'...", KEYPOINT_DATA_FILE)
    keypoint_data = KeypointData.load(KEYPOINT_DATA_FILE)

    log.debug("Creating SIFT detector...")
    sift = cv2.SIFT(nfeatures=0, nOctaveLayers=5, contrastThreshold=0.05, edgeThreshold=30, sigma=1.5)
    bf = cv2.BFMatcher()

    # Set up image source
    log.debug("Initializing video capture device #%s...", IMAGE_SOURCE)
    cap = cv2.VideoCapture(IMAGE_SOURCE)

    frame_width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
    log.debug("Video capture frame size=(w=%d, h=%d)", frame_width, frame_height)

    log.debug("Starting capture loop...")
    frame_number = -1
    while True:
        frame_number += 1
        log.debug("Capturing frame #%d...", frame_number)
        ret, frame = cap.read()
        if not ret:
            log.error("Frame capture failed, stopping...")
            break

        log.debug("Got frame #%d: shape=%s", frame_number, frame.shape)


        # Archive raw frames from video to disk for later inspection/testing
        if CAPTURE_FROM_VIDEO:
            save_frame(IMAGE_FILENAME_FORMAT
                , frame_number, frame, "source frame #%d")

        log.debug("Processing frame #%d...", frame_number)
        processed = process_frame(frame_number, frame, keypoint_data, sift, bf)

        save_frame(IMAGE_DIR + "/processed_%04d.png"
            , frame_number, processed, "processed frame #%d")

        cv2.imshow('Source Image', frame)
        cv2.imshow('Processed Image', processed)

        log.debug("Frame #%d processed.", frame_number)

        c = cv2.waitKey(WAIT_TIME)
        if c == 27:
            log.debug("ESC detected, stopping...")
            break

    log.debug("Closing video capture device...")
    cap.release()
    cv2.destroyAllWindows()
    log.debug("Done.")

# ============================================================================
featuredetector.py 文件源码 项目:bib-tagger 作者: KateRita 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def findMatchesBetweenImages(image_1, image_2):
  """ Return the top 10 list of matches between two input images.

  This function detects and computes SIFT (or ORB) from the input images, and
  returns the best matches using the normalized Hamming Distance.

  Args:
    image_1 (numpy.ndarray): The first image (grayscale).
    image_2 (numpy.ndarray): The second image. (grayscale).

  Returns:
    image_1_kp (list): The image_1 keypoints, the elements are of type
                       cv2.KeyPoint.
    image_2_kp (list): The image_2 keypoints, the elements are of type
                       cv2.KeyPoint.
    matches (list): A list of matches, length 10. Each item in the list is of
                    type cv2.DMatch.

  """
  # matches - type: list of cv2.DMath
  matches = None
  # image_1_kp - type: list of cv2.KeyPoint items.
  image_1_kp = None
  # image_1_desc - type: numpy.ndarray of numpy.uint8 values.
  image_1_desc = None
  # image_2_kp - type: list of cv2.KeyPoint items.
  image_2_kp = None
  # image_2_desc - type: numpy.ndarray of numpy.uint8 values.
  image_2_desc = None

  # WRITE YOUR CODE HERE.
  #init
  sift = SIFT()

  #1. Compute SIFT keypoints and descriptors for both images
  image_1_kp, image_1_desc = sift.detectAndCompute(image_1,None)
  image_2_kp, image_2_desc = sift.detectAndCompute(image_2,None)

  #2. Create a Brute Force Matcher, using the hamming distance (and set crossCheck to true).
  #create BFMatcher object
  bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

  #3. Compute the matches between both images.
  #match descriptors
  matches = bf.match(image_1_desc,image_2_desc)

  #4. Sort the matches based on distance so you get the best matches.
  matches = sorted(matches, key=lambda x: x.distance)

  #5. Return the image_1 keypoints, image_2 keypoints, and the top 10 matches in a list.

  return image_1_kp, image_2_kp, matches[:10]
  # END OF FUNCTION.
sift_matcher.py 文件源码 项目:ocular 作者: wolfd 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def calculate_sift(self, last_frame, new_frame, last_kp=None):
        # find corresponding points in the input image and the template image
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(self.descs[k], scene_desc, k=2)

        # Apply Lowe Ratio Test to the keypoints
        # this should weed out unsure matches
        good_keypoints = []
        for m, n in matches:
            if m.distance < self.good_thresh * n.distance:
                good_keypoints.append(m)

        # put keypoints from template image in template_pts
        # transform the keypoint data into arrays for homography check
        # grab precomputed points
        template_pts = np.float32(
            [self.kps[k][m.queryIdx].pt for m in good_keypoints]
        ).reshape(-1, 1, 2)

        # put corresponding keypoints from input image in scene_img_pts
        scene_img_pts = np.float32(
            [scene_kps[m.trainIdx].pt for m in good_keypoints]
        ).reshape(-1, 1, 2)

        # if we can't find any matching keypoints, bail
        # (probably the scene image was nonexistant/real bad)
        if scene_img_pts.shape[0] == 0:
            return None

        # use OpenCV to calculate optical flow
        new_frame_matched_features, status, error = cv2.calcOpticalFlowPyrLK(
            self.last_frame_gray,
            frame_gray,
            self.last_frame_features,
            None,
            **self.lk_params
        )

        self.publish_interframe_motion(
            self.last_frame_features,
            new_frame_matched_features,
            status,
            error
        )

        # save data for next frame
        self.store_as_last_frame(frame_gray)
servoing_designed_features_quad_panda3d_env.py 文件源码 项目:citysim3d 作者: alexlee-gk 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, action_space, feature_type=None, filter_features=None,
                 max_time_steps=100, distance_threshold=4.0, **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]),
                                                            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node),
                                                          size=film_size,
                                                          near_far=(lens.getNear(), lens.getFar()),
                                                          hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" % self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None
perspec_T_SIFT.py 文件源码 项目:Analog-Utility-Meter-Reader 作者: arjun372 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main():
    stream=urllib.urlopen(CAM_URL)
    bytes=''
    ts=time.time()
    while True:
        bytes+=stream.read(2048)
        a = bytes.find('\xff\xd8')
        b = bytes.find('\xff\xd9')
        if a==-1 or b==-1:
            continue

        # Frame available
        rtimestamp=time.time()
        jpg = bytes[a:b+2]
        bytes= bytes[b+2:]
        img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
        cv2.imshow('RAW',img)

        #ORB to get corresponding points
        kp, des = sift.detectAndCompute(img,None)
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des_ref,des,k=2)
        m = []
        for ma,na in matches:
            if ma.distance < 0.75*na.distance:
                m.append([ma])
        img3 = cv2.drawMatchesKnn(img_ref,kp_ref,img,kp,m[:4], None,flags=2)
        cv2.imshow('MatchesKnn',img3)

        #pts_ref = np.float32([[kp_ref[m[0].queryIdx].pt[0],kp_ref[m[0].queryIdx].pt[1]],[kp_ref[m[1].queryIdx].pt[0],kp_ref[m[1].queryIdx].pt[1]],[kp_ref[m[2].queryIdx].pt[0],kp_ref[m[2].queryIdx].pt[1]],[kp_ref[m[3].queryIdx].pt[0],kp_ref[m[3].queryIdx].pt[1]]])
        #pts     = np.float32([[kp[m[0].trainIdx].pt[0],kp[m[0].trainIdx].pt[1]],[kp[m[1].trainIdx].pt[0],kp[m[1].trainIdx].pt[1]],[kp[m[2].trainIdx].pt[0],kp[m[2].trainIdx].pt[1]],[kp[m[3].trainIdx].pt[0],kp[m[3].trainIdx].pt[1]]])
        # Perspective Transform
        #M = cv2.getPerspectiveTransform(pts_ref,pts)
        #dst = cv2.warpPerspective(img,M,(cols,rows))
        #cv2.imshow('Perspective Transform',dst)

        # Print lag
        print(time.time()-ts)
        ts=time.time()

        if cv2.waitKey(1) == 27:
            exit(0)
auto_stack.py 文件源码 项目:image_stacking 作者: maitek 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def stackImagesKeypointMatching(file_list):

    orb = cv2.ORB_create()

    # disable OpenCL to because of bug in ORB in OpenCV 3.1
    cv2.ocl.setUseOpenCL(False)

    stacked_image = None
    first_image = None
    first_kp = None
    first_des = None
    for file in file_list:
        print(file)
        image = cv2.imread(file,1)
        imageF = image.astype(np.float32) / 255

        # compute the descriptors with ORB
        kp = orb.detect(image, None)
        kp, des = orb.compute(image, kp)

        # create BFMatcher object
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        if first_image is None:
            # Save keypoints for first image
            stacked_image = imageF
            first_image = image
            first_kp = kp
            first_des = des
        else:
             # Find matches and sort them in the order of their distance
            matches = matcher.match(first_des, des)
            matches = sorted(matches, key=lambda x: x.distance)

            src_pts = np.float32(
                [first_kp[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
            dst_pts = np.float32(
                [kp[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

            # Estimate perspective transformation
            M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
            w, h, _ = imageF.shape
            imageF = cv2.warpPerspective(imageF, M, (h, w))
            stacked_image += imageF

    stacked_image /= len(file_list)
    stacked_image = (stacked_image*255).astype(np.uint8)
    return stacked_image

# ===== MAIN =====
# Read all files in directory
TestImgMatch.py 文件源码 项目:LearnHash 作者: galad-loth 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def TestKptMatch():    
    img1=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img1.ppm",cv2.IMREAD_COLOR)
    img2=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img2.ppm",cv2.IMREAD_COLOR)
    gray1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
    gray2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    gap_width=20
    black_gap=npy.zeros((img1.shape[0],gap_width),dtype=npy.uint8)

#    objSIFT = cv2.SIFT(500)
#    kpt1,desc1 = objSIFT.detectAndCompute(gray1,None) 
#    kpt2,desc2 = objSIFT.detectAndCompute(gray2,None) 
#    objMatcher=cv2.BFMatcher(cv2.NORM_L2)
#    matches=objMatcher.knnMatch(desc1,desc2,k=2)

    objORB = cv2.ORB(500)
    kpt1,desc1 = objORB.detectAndCompute(gray1,None) 
    kpt2,desc2 = objORB.detectAndCompute(gray2,None) 
    objMatcher=cv2.BFMatcher(cv2.NORM_HAMMING)
    matches=objMatcher.knnMatch(desc1,desc2,k=2)

    goodMatches=[]
    for bm1,bm2 in matches:
        if bm1.distance < 0.7*bm2.distance:
            goodMatches.append(bm1)

    if len(goodMatches)>10:
        ptsFrom = npy.float32([kpt1[bm.queryIdx].pt for bm in goodMatches]).reshape(-1,1,2)
        ptsTo = npy.float32([kpt2[bm.trainIdx].pt for bm in goodMatches]).reshape(-1,1,2)
        matH, matchMask = cv2.findHomography(ptsFrom, ptsTo, cv2.RANSAC,5.0)

    imgcnb=npy.concatenate((gray1,black_gap,gray2),axis=1)

    plt.figure(1,figsize=(15,6))
    plt.imshow(imgcnb,cmap="gray")
    idx=0
    for bm in goodMatches:
        if 1==matchMask[idx]:
            kptFrom=kpt1[bm.queryIdx]
            kptTo=kpt2[bm.trainIdx]
            plt.plot(kptFrom.pt[0],kptFrom.pt[1],"rs",
                     markerfacecolor="none",markeredgecolor="r",markeredgewidth=2)
            plt.plot(kptTo.pt[0]+img1.shape[1]+gap_width,kptTo.pt[1],"bo",
                     markerfacecolor="none",markeredgecolor="b",markeredgewidth=2)
            plt.plot([kptFrom.pt[0],kptTo.pt[0]+img1.shape[1]+gap_width],
                     [kptFrom.pt[1],kptTo.pt[1]],"g-",linewidth=2)
        idx+=1
    plt.axis("off")


问题


面经


文章

微信
公众号

扫码关注公众号