python类TERM_CRITERIA_EPS的实例源码

calibrate.py 文件源码 项目:pycalibrate 作者: reconstruct-on-the-fly 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def find_points(images):
    pattern_size = (9, 6)
    obj_points = []
    img_points = []

    # Assumed object points relation
    a_object_point = np.zeros((PATTERN_SIZE[1] * PATTERN_SIZE[0], 3),
                              np.float32)
    a_object_point[:, :2] = np.mgrid[0:PATTERN_SIZE[0],
                                     0:PATTERN_SIZE[1]].T.reshape(-1, 2)

    # Termination criteria for sub pixel corners refinement
    stop_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER,
                     30, 0.001)

    print('Finding points ', end='')
    debug_images = []
    for (image, color_image) in images:
        found, corners = cv.findChessboardCorners(image, PATTERN_SIZE, None)
        if found:
            obj_points.append(a_object_point)
            cv.cornerSubPix(image, corners, (11, 11), (-1, -1), stop_criteria)
            img_points.append(corners)

            print('.', end='')
        else:
            print('-', end='')

        if DEBUG:
            cv.drawChessboardCorners(color_image, PATTERN_SIZE, corners, found)
            debug_images.append(color_image)

        sys.stdout.flush()

    if DEBUG:
        display_images(debug_images, DISPLAY_SCALE)

    print('\nWas able to find points in %s images' % len(img_points))
    return obj_points, img_points


# images is a lis of tuples: (gray_image, color_image)
calibrator.py 文件源码 项目:camera_calibration_frontend 作者: groundmelon 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners)
LensDistortion.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _findChessboard(self):
        # Find the chess board corners
        flags = cv2.CALIB_CB_FAST_CHECK
        if self._detect_sensible:
            flags = (cv2.CALIB_CB_FAST_CHECK |
                     cv2.CALIB_CB_ADAPTIVE_THRESH |
                     cv2.CALIB_CB_FILTER_QUADS |
                     cv2.CALIB_CB_NORMALIZE_IMAGE)

        (didFindCorners, corners) = cv2.findChessboardCorners(
            self.img, self.opts['size'], flags=flags
        )
        if didFindCorners:
            # further refine corners, corners is updatd in place
            cv2.cornerSubPix(self.img, corners, (11, 11), (-1, -1),
                             # termination criteria for corner estimation for
                             # chessboard method
                             (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                              30, 0.001)
                             )  # returns None
        return didFindCorners, corners
optflow_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def sparse_optical_flow(im1, im2, pts, fb_threshold=-1, 
                        window_size=15, max_level=2, 
                        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)): 

    # Forward flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(im1, im2, pts, None, 
                                           winSize=(window_size, window_size), 
                                           maxLevel=max_level, criteria=criteria )

    # Backward flow
    if fb_threshold > 0:     
        p0r, st0, err = cv2.calcOpticalFlowPyrLK(im2, im1, p1, None, 
                                           winSize=(window_size, window_size), 
                                           maxLevel=max_level, criteria=criteria)
        p0r[st0 == 0] = np.nan

        # Set only good
        fb_good = (np.fabs(p0r-p0) < fb_threshold).all(axis=1)

        p1[~fb_good] = np.nan
        st = np.bitwise_and(st, st0)
        err[~fb_good] = np.nan

    return p1, st, err
screencp.py 文件源码 项目:OpenAI_Challenges 作者: AlwaysLearningDeeper 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def k(screen):
        Z = screen.reshape((-1,3))

        # convert to np.float32
        Z = np.float32(Z)

        # define criteria, number of clusters(K) and apply kmeans()
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2
        ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

        # Now convert back into uint8, and make original image
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((screen.shape))
        return res2
color_quantization.py 文件源码 项目:Machine-Learning 作者: Jegathis 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows()
common.py 文件源码 项目:prototype 作者: chutsu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def draw_chessboard_corners(image):
    # Find the chess board corners
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray_image, (9, 6), None)

    # Draw image
    if ret is True:
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                    30,
                    0.001)
        corners2 = cv2.cornerSubPix(gray_image,
                                    corners,
                                    (11, 11),
                                    (-1, -1),
                                    criteria)
        img = cv2.drawChessboardCorners(image,
                                        (9, 6),
                                        corners2,
                                        ret)

    return img
vo.py 文件源码 项目:prototype 作者: chutsu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def feature_tracking(image_ref, image_cur, px_ref):
    """Feature Tracking

    Parameters
    ----------
    image_ref : np.array
        Reference image
    image_cur : np.array
        Current image
    px_ref :
        Reference pixels

    Returns
    -------
    (kp1, kp2) : (list of Keypoints, list of Keypoints)

    """
    # Setup
    win_size = (21, 21)
    max_level = 3
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)

    # Perform LK-tracking
    lk_params = {"winSize": win_size,
                 "maxLevel": max_level,
                 "criteria": criteria}
    kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref,
                                            image_cur,
                                            px_ref,
                                            None,
                                            **lk_params)

    # Post-process
    st = st.reshape(st.shape[0])
    kp1 = px_ref[st == 1]
    kp2 = kp2[st == 1]

    return kp1, kp2
marker.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
marker.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
marker.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
marker.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
utils.py 文件源码 项目:kaggle-dstl 作者: lopuhin 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _get_alignment(im_ref, im_to_align, key):
    if key is not None:
        cached_path = Path('align_cache').joinpath('{}.alignment'.format(key))
        if cached_path.exists():
            with cached_path.open('rb') as f:
                return pickle.load(f)
    logger.info('Getting alignment for {}'.format(key))
    warp_mode = cv2.MOTION_TRANSLATION
    warp_matrix = np.eye(2, 3, dtype=np.float32)
    criteria = (
        cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000,  1e-8)
    cc, warp_matrix = cv2.findTransformECC(
        im_ref, im_to_align, warp_matrix, warp_mode, criteria)
    if key is not None:
        with cached_path.open('wb') as f:
            pickle.dump((cc, warp_matrix), f)
    logger.info('Got alignment for {} with cc {:.3f}: {}'
                .format(key, cc, str(warp_matrix).replace('\n', '')))
    return cc, warp_matrix
matrix_encoder.py 文件源码 项目:action-recoginze 作者: WeiruZ 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def k_means(self, a_frame, K=2):
        """
        :param a_frame:
        :param K:
        :return: np.ndarray draw the frame use K color's centers
        """
        i = 0
        Z = a_frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((a_frame.shape))

        return res2
k-means_video.py 文件源码 项目:action-recoginze 作者: WeiruZ 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cluster(frame_matrix):
    new_frame_matrix = []
    i = 0
    for frame in frame_matrix:
        print "reader {} frame".format(i)
        i += 1
        Z = frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2

        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((frame.shape))

        new_frame_matrix.append(res2)
        cv2.imshow('res2', res2)
        cv2.waitKey(1)
    cv2.destroyAllWindows()
descriptors.py 文件源码 项目:object-classification 作者: HenrYxZ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def gen_codebook(dataset, descriptors, k = 64):
    """
    Generate a k codebook for the dataset.

    Args:
        dataset (Dataset object): An object that stores information about the dataset.
        descriptors (list of integer arrays): The descriptors for every class.
        k (integer): The number of clusters that are going to be calculated.

    Returns:
        list of integer arrays: The k codewords for the dataset.
    """
    iterations = 10
    epsilon = 1.0
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, iterations, epsilon)
    compactness, labels, centers = cv2.kmeans(descriptors, k , criteria, iterations, cv2.KMEANS_RANDOM_CENTERS)
    return centers
Q3Support.py 文件源码 项目:Recognition 作者: thautwarm 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def deal(self,frame):
        frame=frame.copy()
        track_window=self.track_window
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        roi_hist=self.roi_hist 
        dst = cv2.calcBackProject([frame],[0],roi_hist,[0,180],1)
        if self.m=='m':
            ret, track_window_r = cv2.meanShift(dst, track_window, term_crit)
            x,y,w,h = track_window_r
            img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        elif self.m=='c':
            ret, track_window_r = cv2.CamShift(dst, track_window, term_crit)


            pts = cv2.boxPoints(ret)
            pts = np.int0(pts)
            img2 = cv2.polylines(frame,[pts],True, 255,2)
        rectsNew=[]

        center1=(track_window[0]+track_window[2]//2,track_window[1]+track_window[3]//2)
        center2=(track_window_r[0]+track_window_r[2]//2,track_window_r[1]+track_window_r[3]//2)
        img2 = cv2.line(img2,center1,center2,color=0)
        rectsNew=track_window_r
#        x,y,w,h = track_window
#        img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        cv2.imshow('img2',img2)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
        return rectsNew
markerfunctions.py 文件源码 项目:ArkwoodAR 作者: rdmilligan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_vectors(image, points, mtx, dist):

    # order points
    points = _order_points(points)

    # set up criteria, image, points and axis
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    imgp = np.array(points, dtype='float32')

    objp = np.array([[0.,0.,0.],[1.,0.,0.],
                        [1.,1.,0.],[0.,1.,0.]], dtype='float32')  

    # calculate rotation and translation vectors
    cv2.cornerSubPix(gray,imgp,(11,11),(-1,-1),criteria)
    rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)

    return rvecs, tvecs
MultiClientMain.py 文件源码 项目:Camera2TCP 作者: kevinkit 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def affine(self):
        warp_mode = cv2.MOTION_HOMOGRAPHY
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000,  1e-10)
        warp_matrix = np.eye(3, 3, dtype=np.float32)

        while True:
            try:
                if self.ret[0] is not None and self.client[0].img is not None:
                    master_cam_grey = cv2.cvtColor(self.client[0].img, cv2.COLOR_BGR2GRAY)
                else:
                    print("Image was none!")
                for i in range(1,self.cams):
                    if self.ret[i] is not None:
                        print("Trying to calibrate")
                        slave_cam = cv2.cvtColor(self.client[i].img, cv2.COLOR_BGR2GRAY)
                        try:
                            (cc, warp_matrix) = cv2.findTransformECC (self.get_gradient(master_cam_grey), self.get_gradient(slave_cam),warp_matrix, warp_mode, criteria)
                        except Exception as e:
                            print(e)
                        print(warp_matrix)
                    else:
                        print("Image was none")
                        ti.sleep(5);
            except:
                ti.sleep(1)
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
sudoku.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
feature_detection.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def subpixel_pts(self, im, pts): 
        """Perform subpixel refinement"""
        term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
        cv2.cornerSubPix(im, pts, (10, 10), (-1, -1), term)
        return
calibrator.py 文件源码 项目:camera_calibration_frontend 作者: groundmelon 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cal_fromcorners(self, good):
        # Perform monocular calibrations
        lcorners = [(l, b) for (l, r, b) in good]
        rcorners = [(r, b) for (l, r, b) in good]
        self.l.cal_fromcorners(lcorners)
        self.r.cal_fromcorners(rcorners)

        lipts = [ l for (l, _, _) in good ]
        ripts = [ r for (_, r, _) in good ]
        boards = [ b for (_, _, b) in good ]

        opts = self.mk_object_points(boards, True)

        flags = cv2.CALIB_FIX_INTRINSIC

        self.T = numpy.zeros((3, 1), dtype=numpy.float64)
        self.R = numpy.eye(3, dtype=numpy.float64)
        if LooseVersion(cv2.__version__).version[0] == 2:
            cv2.stereoCalibrate(opts, lipts, ripts, self.size,
                               self.l.intrinsics, self.l.distortion,
                               self.r.intrinsics, self.r.distortion,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)
        else:
            cv2.stereoCalibrate(opts, lipts, ripts,
                               self.l.intrinsics, self.l.distortion,
                               self.r.intrinsics, self.r.distortion,
                               self.size,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)

        self.set_alpha(0.0)
optical_flow.py 文件源码 项目:self-driving 作者: BoltzmannBrain 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, videoSource, featurePtMask=None, verbosity=0):
    # cap the length of optical flow tracks
    self.maxTrackLength = 10

    # detect feature points in intervals of frames; adds robustness for
    # when feature points disappear.
    self.detectionInterval = 5

    # Params for Shi-Tomasi corner (feature point) detection
    self.featureParams = dict(
        maxCorners=500,
        qualityLevel=0.3,
        minDistance=7,
        blockSize=7
    )
    # Params for Lucas-Kanade optical flow
    self.lkParams = dict(
        winSize=(15, 15),
        maxLevel=2,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
    )
    # # Alternatively use a fast feature detector
    # self.fast = cv2.FastFeatureDetector_create(500)

    self.verbosity = verbosity

    (self.videoStream,
     self.width,
     self.height,
     self.featurePtMask) = self._initializeCamera(videoSource)
test_monkey.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_features():
    from atx.drivers.android_minicap import AndroidDeviceMinicap
    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    # r, h, c, w = 200, 100, 200, 100
    # track_window = (c, r, w, h)
    # oldimg = cv2.imread('base1.png')
    # roi = oldimg[r:r+h, c:c+w]
    # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv_roi, 0, 255)
    # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
    # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,  10, 1)


    while True:
        try:
            w, h = d._screen.shape[:2]
            img = cv2.resize(d._screen, (h/2, w/2))
            cv2.imshow('preview', img)

            hist = cv2.calcHist([img], [0], None, [256], [0,256])
            plt.plot(plt.hist(hist.ravel(), 256))
            plt.show()
            # if img.shape == oldimg.shape:
            #     # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            #     # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
            #     # x, y, w, h = track_window
            #     cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            #     cv2.imshow('preview', img)
            # # cv2.imshow('preview', img)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break

    cv2.destroyWindow('preview')
test_monkey.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_kmeans(img):
    ## K????
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey()
test_monkey.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_features():
    from atx.drivers.android_minicap import AndroidDeviceMinicap
    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    # r, h, c, w = 200, 100, 200, 100
    # track_window = (c, r, w, h)
    # oldimg = cv2.imread('base1.png')
    # roi = oldimg[r:r+h, c:c+w]
    # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv_roi, 0, 255)
    # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
    # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,  10, 1)


    while True:
        try:
            w, h = d._screen.shape[:2]
            img = cv2.resize(d._screen, (h/2, w/2))
            cv2.imshow('preview', img)

            hist = cv2.calcHist([img], [0], None, [256], [0,256])
            plt.plot(plt.hist(hist.ravel(), 256))
            plt.show()
            # if img.shape == oldimg.shape:
            #     # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            #     # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
            #     # x, y, w, h = track_window
            #     cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            #     cv2.imshow('preview', img)
            # # cv2.imshow('preview', img)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break

    cv2.destroyWindow('preview')
test_monkey.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_kmeans(img):
    ## K????
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey()
alignment.py 文件源码 项目:car-detection 作者: mmetcalfe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def find_label_clusters(kitti_base, kittiLabels, shape, num_clusters, descriptors=None):
    if descriptors is None:
        progressbar = ProgressBar('Computing descriptors', max=len(kittiLabels))
        descriptors = []
        for label in kittiLabels:
            progressbar.next()
            img = getCroppedSampleFromLabel(kitti_base, label)
            # img = cv2.resize(img, (shape[1], shape[0]), interpolation=cv2.INTER_AREA)
            img = resizeSample(img, shape, label)
            hist = get_hog(img)
            descriptors.append(hist)
        progressbar.finish()
    else:
        print 'find_label_clusters,', 'Using supplied descriptors.'
        print len(kittiLabels), len(descriptors)
        assert(len(kittiLabels) == len(descriptors))

    # X = np.random.randint(25,50,(25,2))
    # Y = np.random.randint(60,85,(25,2))
    # Z = np.vstack((X,Y))

    # convert to np.float32
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret,label,center=cv2.kmeans(Z,K,None,criteria,attempts,cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(kittiLabels, label)
    return clusters
    # # Plot the data
    # from matplotlib import pyplot as plt
    # plt.scatter(A[:,0],A[:,1])
    # plt.scatter(B[:,0],B[:,1],c = 'r')
    # plt.scatter(center[:,0],center[:,1],s = 80,c = 'y', marker = 's')
    # plt.xlabel('Height'),plt.ylabel('Weight')
    # plt.show()
alignment.py 文件源码 项目:car-detection 作者: mmetcalfe 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def find_sample_clusters(pos_reg_generator, window_dims, hog, num_clusters):
    regions = list(pos_reg_generator)
    descriptors = trainhog.compute_hog_descriptors(hog, regions, window_dims, 1)

    # convert to np.float32
    descriptors = [rd.descriptor for rd in descriptors]
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret,label,center=cv2.kmeans(Z,K,None,criteria,attempts,cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(regions, label)
    return clusters


问题


面经


文章

微信
公众号

扫码关注公众号