python类drawContours()的实例源码

detect.py 文件源码 项目:object-detection-python-opencv 作者: hasanaliqureshi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def find_biggest_contour(image):
    # Copy
    image = image.copy()
    #input, gives all the contours, contour approximation compresses horizontal,
    #vertical, and diagonal segments and leaves only their end points. For example,
    #an up-right rectangular contour is encoded with 4 points.
    #Optional output vector, containing information about the image topology.
    #It has as many elements as the number of contours.
    #we dont need it
    _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    # Isolate largest contour
    contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
    biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]

    mask = np.zeros(image.shape, np.uint8)
    cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
    return biggest_contour, mask
triangle-detect.py 文件源码 项目:illumeme 作者: josmcg 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def find_triangles(filename):
    FIRST = 0
    RED = (0, 0, 255)
    THICKNESS = 3
    copy = img = cv2.imread(filename)
    grey_img = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
    ret, thresh = cv2.threshold(grey_img, 127, 255, 1)
    contours, h = cv2.findContours(thresh, 1, 2)
    largest = None
    for contour in countours:
        approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
        if len(approx) == 3:
            #triangle found
            if largest is None or cv2.contourArea(contour) > cv2.contourArea(largest):
                largest = contour

    #write file
    cv2.drawContours(copy, [largest], FIRST, RED, THICKNESS)
    cv2.imwrite(filename +"_result", copy)
border_removal.py 文件源码 项目:idmatch 作者: maddevsio 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def remove_borders(image):
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)
    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow('edged', edged)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        print(len(approx) == 4)
        if len(approx) == 4:
            screenCnt = approx
            break
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    if screenCnt is not None and len(screenCnt) > 0:
        return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    return orig
dushu.py 文件源码 项目:dust_repos 作者: taozhijiang 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def img_contour_select(ctrs, im):
    # ????????????
    cand_rect = []
    for item in ctrs:
        epsilon = 0.02*cv2.arcLength(item, True)
        approx = cv2.approxPolyDP(item, epsilon, True)  
        if len(approx) <= 8:
            rect = cv2.minAreaRect(item)
            if rect[1][0] < 20 or rect[1][1] < 20:
                continue
            if rect[1][0] > 150 or rect[1][1] > 150:
                continue        
            #ratio = (rect[1][1]+0.00001) / rect[1][0]
            #if ratio > 1 or ratio < 0.9:
            #    continue
            box = cv2.boxPoints(rect)
            box_d = np.int0(box)
            cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
            cand_rect.append(box)
    img_show_hook("????", im)   
    return cand_rect
vision.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def profile_score(contour, binary):
    """
    Calculate a score based on the "profile" of the target, basically how closely its geometry matches with the expected
    geometry of the goal
    :param contour:
    :param binary:
    :return:
    """
    bounding = cv2.boundingRect(contour)
    pixels = np.zeros((binary.shape[0], binary.shape[1]))
    cv2.drawContours(pixels, [contour], -1, 255, -1)
    col_averages = np.mean(pixels, axis=0)[bounding[0]:bounding[0] + bounding[2]]
    row_averages = np.mean(pixels, axis=1)[bounding[1]:bounding[1] + bounding[3]]
    # normalize to between 0 and 1
    col_averages *= 1.0 / col_averages.max()
    row_averages *= 1.0 / row_averages.max()

    col_diff = np.subtract(col_averages, col_profile(col_averages.shape[0], bounding[2]))
    row_diff = np.subtract(row_averages, row_profile(row_averages.shape[0], bounding[3]))

    # average difference should be close to 0
    avg_diff = np.mean([np.mean(col_diff), np.mean(row_diff)])
    return 100 - (avg_diff * 50)
image_processor.py 文件源码 项目:2017-robot 作者: frc1418 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def find_contours(self, img):

        thresh_img = self.threshold(img)

        _, contours, _ = cv2.findContours(thresh_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        result = []
        for cnt in contours:
            approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), True)

            if self.draw_approx:
                cv2.drawContours(self.out, [approx], -1, self.BLUE, 2, lineType=8)

            if len(approx) > 3 and len(approx) < 15:
                _, _, w, h = cv2.boundingRect(approx)
                if h > self.min_height and w > self.min_width:
                        hull = cv2.convexHull(cnt)
                        approx2 = cv2.approxPolyDP(hull,0.01*cv2.arcLength(hull,True),True)

                        if self.draw_approx2:
                            cv2.drawContours(self.out, [approx2], -1, self.GREEN, 2, lineType=8)

                        result.append(approx2)
        return result
idcard.py 文件源码 项目:dust_repos 作者: taozhijiang 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def img_contour_select(ctrs, im):
    # ????????????
    cand_rect = []
    for item in ctrs:
        epsilon = 0.02*cv2.arcLength(item, True)
        approx = cv2.approxPolyDP(item, epsilon, True)  
        if len(approx) <= 8:
            rect = cv2.minAreaRect(item)
            #????????
            if rect[2] < -10 and rect[2] > -80:
                continue
            if rect[1][0] < 10 or rect[1][1] < 10:
                continue
            #ratio = (rect[1][1]+0.00001) / rect[1][0]
            #if ratio > 1 or ratio < 0.9:
            #    continue
            box = cv2.boxPoints(rect)
            box_d = np.int0(box)
            cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
            cand_rect.append(box)
    img_show_hook("????", im)   
    return cand_rect
10-PiStorms_icontracker.py 文件源码 项目:PiStorms 作者: mindsensors 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def findSquare( self,frame ):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 60, 60)
        # find contours in the edge map
        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # loop over our contours to find hexagon
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50]
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.004 * peri, True)
            # if our approximated contour has four points, then
            # we can assume that we have found our squeare

            if len(approx) >= 4:
                screenCnt = approx
                x,y,w,h = cv2.boundingRect(c)
                cv2.drawContours(image, [approx], -1, (0, 0, 255), 1)
                #cv2.imshow("Screen", image)
                #create the mask and remove rest of the background
                mask = np.zeros(image.shape[:2], dtype = "uint8")
                cv2.drawContours(mask, [screenCnt], -1, 255, -1)
                masked = cv2.bitwise_and(image, image, mask = mask)
                #cv2.imshow("Masked",masked  )
                #crop the masked image to to be compared to referance image
                cropped = masked[y:y+h,x:x+w]
                #scale the image so it is fixed size as referance image
                cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA)

                return cropped
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
data_preprocessing_autoencoder.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def visualize(frame, coordinates_list, alpha = 0.80, color=[255, 255, 255]):
    """
    Args:
        1. frame:               OpenCV's image which has to be visualized.
        2. coordinates_list:    List of coordinates which will be visualized in the given `frame`
        3. alpha, color:        Some parameters which help in visualizing properly. 
                                A convex hull will be shown for each element in the `coordinates_list` 
    """
    layer = frame.copy()
    output = frame.copy()

    for coordinates in coordinates_list:
        c_hull = cv2.convexHull(coordinates)
        cv2.drawContours(layer, [c_hull], -1, color, -1)

    cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output)
    cv2.imshow("Output", output)
gesture_hci.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
        cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
        crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
        grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)

        _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        cv2.imshow('Thresh', thresh1)
        contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # draw contour on threshold image
        if len(contours) > 0:
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

        return contours, crop_res


# Check ConvexHull  and Convexity Defects
screencp.py 文件源码 项目:OpenAI_Challenges 作者: AlwaysLearningDeeper 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def process_img(img):
    original_image=img
    processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
    processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
    copy=processed_img
    vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
    processed_img = roi(processed_img, np.int32([vertices]))
    verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
    platform = roi(copy, np.int32([verticesP]))
    #                       edges
    #lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
    #draw_lines(processed_img,lines)
    #draw_lines(original_image,lines)

    #Platform lines
    #imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
    ret,thresh = cv2.threshold(platform,127,255,0)
    im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
    try:
        platformpos=contours[0][0][0]
    except:
        platformpos=[[0]]
    circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
                               param1=90, param2=5, minRadius=1, maxRadius=3)

    ballpos=draw_circles(original_image,circles=circles)

    return processed_img,original_image,platform,platformpos,ballpos
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def gimpMarkup(self, hints = gimpContours, image = "2x2-red-1.jpg", feature = "top-left-monitor"):
        r = Rectangle(*hints[image][feature])
        contour = r.asContour()
        cv2.drawContours(self.img, [contour], -1, (0, 255, 0), 5 )
        title = self.tgen.next(feature)
        if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        roi = r.getRoi(self.img)
        self.rois[feature] = roi
        # Histogram the ROI to get the spread of intensities, in each channel and grayscale
        title = '%s-roi.jpg' % feature
        if self.show: ImageViewer(roi).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        colors = ('b','g','r')
        for i,col in enumerate(colors):
            hist = cv2.calcHist([roi], [i], None, [256], [0,256])
            plt.plot(hist, color = col)
            plt.xlim([0,256])
            #plt.hist(roi.ravel(), 256, [0,256])
        plt.show()
        cmap = ColorMapper(roi)
        cmap.mapit(1)
        title = self.tgen.next('colourMapping')
        if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        cv2.waitKey()
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def locate(self, all = False, show = False, outimg = None):
        for (transition, mask) in self.transitions:
            if transition == 1:
                sfv3 = SquareFinderV3(mask, cos_limit = 0.5)
                squares = sfv3.find(self.mode)
                if show:
                    SquaresOverlayV4(mask, squares, all = all)
                    SquaresOverlayV4(mask, squares, all = False)
                else:
                    square_contours = [square.contour for square in squares]
                    best_contours_tuples = classify_multi_monitors_contour_set(square_contours)
                    found = mask.copy()
                    self.best_contours = [contour.astype('int32') for (contour, index) in best_contours_tuples]
                    cv2.drawContours( found, self.best_contours, -1, (0,0,255),3)
                    if outimg:
                        cv2.imwrite(outimg, found)
                return self.best_contours
getPMatrix.py 文件源码 项目:AR-BXT-AR4Python 作者: GeekLiB 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def drawBox(self, img):
        axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
                          [0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
        imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
        imgpts = np.int32(imgpts).reshape(-1,2)

        # draw pillars in blue color
        for i,j in zip(range(4),range(4,8)):
            img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)

        # draw top layer in red color
        outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)

        return outImg

# Debug Code.
getPMatrix.py 文件源码 项目:AR-BXT-AR4Python 作者: GeekLiB 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def drawBox(self, img):
        axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
                          [0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
        imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
        imgpts = np.int32(imgpts).reshape(-1,2)

        # draw pillars in blue color
        for i,j in zip(range(4),range(4,8)):
            img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)

        # draw top layer in red color
        outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)

        return outImg

# Debug Code.
crop.py 文件源码 项目:cervix-roi-segmentation-by-unet 作者: scottykwok 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cropCircle(img, resize=None):
    if resize:
        if (img.shape[0] > img.shape[1]):
            tile_size = (int(img.shape[1] * resize / img.shape[0]), resize)
        else:
            tile_size = (resize, int(img.shape[0] * resize / img.shape[1]))
        img = cv2.resize(img, dsize=tile_size, interpolation=cv2.INTER_CUBIC)
    else:
        tile_size = img.shape

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key=cv2.contourArea, reverse=True)[0]

    ff = np.zeros((gray.shape[0], gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0] + 2, gray.shape[1] + 2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1] / 2), int(gray.shape[0] / 2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0], rect[2]), max(rect[0], rect[2]), min(rect[1], rect[3]), max(rect[1], rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff, (min(rect[1], rect[3]), min(rect[0], rect[2])), (max(rect[1], rect[3]), max(rect[0], rect[2])), 3,
                  2)

    return [img_crop, rectangle, tile_size]
preprocessing.py 文件源码 项目:pycolor_detection 作者: parth1993 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def findSignificantContours(img, sobel_8u, sobel):
    image, contours, heirarchy = cv2.findContours(sobel_8u, \
                                                  cv2.RETR_EXTERNAL, \
                                                  cv2.CHAIN_APPROX_SIMPLE)
    mask = np.ones(image.shape[:2], dtype="uint8") * 255

    level1 = []
    for i, tupl in enumerate(heirarchy[0]):

        if tupl[3] == -1:
            tupl = np.insert(tupl, 0, [i])
            level1.append(tupl)
    significant = []
    tooSmall = sobel_8u.size * 10 / 100
    for tupl in level1:
        contour = contours[tupl[0]];
        area = cv2.contourArea(contour)
        if area > tooSmall:
            cv2.drawContours(mask, \
                             [contour], 0, (0, 255, 0), \
                             2, cv2.LINE_AA, maxLevel=1)
            significant.append([contour, area])
    significant.sort(key=lambda x: x[1])
    significant = [x[0] for x in significant];
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
    mask = sobel.copy()
    mask[mask > 0] = 0
    cv2.fillPoly(mask, significant, 255, 0)
    mask = np.logical_not(mask)
    img[mask] = 0;

    return img
navigation.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def side_intersect(self, image, contours, row, markup=True):
        """ Find intersections to both sides along a row """
        if markup:
            cv2.line(image, (0, row), (image.shape[1], row), (0, 0, 255), 1)

        cnt_l, col_l = self.find_intersect(image, contours, row, -1)
        if markup and cnt_l is not None:
            cv2.drawContours(image, [contours[cnt_l]], -1, (0, 255, 255), -1)
            cv2.circle(image, (col_l, row), 4, (0, 255, 0), 2)

        cnt_r, col_r = self.find_intersect(image, contours, row, 1)
        if markup and cnt_r is not None:
            cv2.drawContours(image, [contours[cnt_r]], -1, (255, 255, 0), -1)
            cv2.circle(image, (col_r, row), 4, (0, 255, 0), 2)

        return (cnt_l, col_l), (cnt_r, col_r)
find_rect_and_transform.py 文件源码 项目:quadrilaterals-rectifier 作者: michal2229 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def extract_rect(im):
    imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)

    ret,thresh = cv2.threshold(imgray, 127, 255, 0)

    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # finding contour with max area
    largest = None
    for cnt in contours:
        if largest == None or cv2.contourArea(cnt) > cv2.contourArea(largest):
            largest = cnt

    peri = cv2.arcLength(largest, True)
    appr = cv2.approxPolyDP(largest, 0.02 * peri, True)

    #cv2.drawContours(im, appr, -1, (0,255,0), 3)
    points_list = [[i[0][0], i[0][1]] for i in appr] 

    left  = sorted(points_list, key = lambda p: p[0])[0:2]
    right = sorted(points_list, key = lambda p: p[0])[2:4]

    print("l " + str(left))
    print("r " + str(right))

    lu = sorted(left, key = lambda p: p[1])[0]
    ld = sorted(left, key = lambda p: p[1])[1]

    ru = sorted(right, key = lambda p: p[1])[0]
    rd = sorted(right, key = lambda p: p[1])[1]

    print("lu " + str(lu))
    print("ld " + str(ld))
    print("ru " + str(ru))
    print("rd " + str(rd))

    lu_ = [ (lu[0] + ld[0])/2, (lu[1] + ru[1])/2 ]
    ld_ = [ (lu[0] + ld[0])/2, (ld[1] + rd[1])/2 ]
    ru_ = [ (ru[0] + rd[0])/2, (lu[1] + ru[1])/2 ]
    rd_ = [ (ru[0] + rd[0])/2, (ld[1] + rd[1])/2 ]

    print("lu_ " + str(lu_))
    print("ld_ " + str(ld_))
    print("ru_ " + str(ru_))
    print("rd_ " + str(rd_))

    src_pts = np.float32(np.array([lu, ru, rd, ld]))
    dst_pts = np.float32(np.array([lu_, ru_, rd_, ld_]))

    h,w,b = im.shape
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    print("H" + str(H))

    imw =  cv2.warpPerspective(im, H, (w, h))

    return imw[lu_[1]:rd_[1], lu_[0]:rd_[0]] # cropping image
crop.py 文件源码 项目:idmatch 作者: maddevsio 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.boxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary)
PseudoSquareROI.py 文件源码 项目:dataArtist 作者: radjkarl 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getMask(self, shape):

        p=self.state['pos']
        s=self.state['size']
        center=p + s / 2
        a=self.state['angle']
        # opencv convention:
        shape = (shape[1], shape[0])
        arr1 = np.zeros(shape, dtype=np.uint8)
        arr2 = np.zeros(shape, dtype=np.uint8)

        # draw rotated rectangle:
        vertices = np.int0(cv2.boxPoints((center, s, a)))
        cv2.drawContours(arr1, [vertices], 0, color=1, thickness=-1)
        # draw ellipse:
        cv2.ellipse(arr2, (int(center[0]), int(center[1])), (int(s[0] / 2 * self._ratioEllispeRectangle),
                     int(s[1] / 2 * self._ratioEllispeRectangle)), int(a),
                    startAngle=0, endAngle=360, color=1, thickness=-1)
        # bring both together:
        return np.logical_and(arr1, arr2).T
GridROI.py 文件源码 项目:dataArtist 作者: radjkarl 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def getMask(self, shape):

        p = self.state['pos']
        s = self.state['size']
        center = p + s / 2
        a = self.state['angle']
        # opencv convention:
        shape = (shape[1], shape[0])
        arr = np.zeros(shape, dtype=np.uint8)
        # draw rotated rectangle:
        vertices = np.int0(cv2.boxPoints((center, s, a)))
        cv2.drawContours(arr, [vertices],
                         0,
                         color=1,
                         thickness=-1)
        return arr.astype(bool).T
dip.py 文件源码 项目:OpenCV2 作者: SarathM1 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def lipSegment(self, img):
        # self.t1 = cv2.getTickCount()
        lipHull = self.dlib_obj.get_landmarks(img)
        cv2.drawContours(img, lipHull, -1, (255, 0, 0), 2)
        (x, y), (MA, ma), angle = cv2.fitEllipse(lipHull)
        a = ma/2
        b = MA/2

        eccentricity = sqrt(pow(a, 2)-pow(b, 2))
        eccentricity = round(eccentricity/a, 2)

        cv2.putText(img, 'E = '+str(round(eccentricity, 3)), (10, 350),
                    self.font, 1, (255, 0, 0), 1)

        if(eccentricity < 0.9):
            self.flags.cmd = 'b'
        else:
            self.flags.cmd = 'f'

        if angle < 80:
            self.flags.cmd = 'l'
        elif angle > 100:
            self.flags.cmd = 'r'

        cv2.putText(img, 'Cmd = ' + self.flags.cmd, (10, 300),  self.font,  1,
                    (0, 0, 255), 1, 16)
        # self.t2 = cv2.getTickCount()
        # print "Time = ", (self.t2-self.t1)/cv2.getTickFrequency()
        return img
Artificial-potential-without-controller.py 文件源码 项目:Artificial-Potential-Field 作者: vampcoder 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 35000 and cv2.contourArea(cnt) < 15000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    cv2.imshow('arr', arr)
    k = cv2.waitKey(0)
    return arr
Artificial-potential-controller.py 文件源码 项目:Artificial-Potential-Field 作者: vampcoder 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 3600 and cv2.contourArea(cnt) < 25000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    cv2.imshow('arr', arr)
    k = cv2.waitKey(0)
    return arr
page_dewarp.py 文件源码 项目:page_dewarp 作者: mzucker 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def visualize_contours(name, small, cinfo_list):

    regions = np.zeros_like(small)

    for j, cinfo in enumerate(cinfo_list):

        cv2.drawContours(regions, [cinfo.contour], 0,
                         CCOLORS[j % len(CCOLORS)], -1)

    mask = (regions.max(axis=2) != 0)

    display = small.copy()
    display[mask] = (display[mask]/2) + (regions[mask]/2)

    for j, cinfo in enumerate(cinfo_list):
        color = CCOLORS[j % len(CCOLORS)]
        color = tuple([c/4 for c in color])

        cv2.circle(display, fltp(cinfo.center), 3,
                   (255, 255, 255), 1, cv2.LINE_AA)

        cv2.line(display, fltp(cinfo.point0), fltp(cinfo.point1),
                 (255, 255, 255), 1, cv2.LINE_AA)

    debug_show(name, 1, 'contours', display)
page_dewarp.py 文件源码 项目:page_dewarp 作者: mzucker 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def visualize_spans(name, small, pagemask, spans):

    regions = np.zeros_like(small)

    for i, span in enumerate(spans):
        contours = [cinfo.contour for cinfo in span]
        cv2.drawContours(regions, contours, -1,
                         CCOLORS[i*3 % len(CCOLORS)], -1)

    mask = (regions.max(axis=2) != 0)

    display = small.copy()
    display[mask] = (display[mask]/2) + (regions[mask]/2)
    display[pagemask == 0] /= 4

    debug_show(name, 2, 'spans', display)
api.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def remove_blobs(image, min_area=0, max_area=sys.maxsize, threshold=128,
                 method='8-connected', return_mask=False):
    """Binarize image using threshold, and remove (turn into black)
    blobs of connected pixels of white of size bigger or equal than
    min_area but smaller or equal than max_area from the original image,
    returning it afterward."""
    method = method.lower()
    if method == '4-connected':
        method = cv2.LINE_4
    elif method in ('16-connected', 'antialiased'):
        method = cv2.LINE_AA
    else:  # 8-connected
        method = cv2.LINE_8
    mono_image = binarize_image(image, method='boolean', threshold=threshold)
    _, all_contours, _ = cv2.findContours(mono_image, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
    contours = np.array([contour for contour in all_contours
                         if min_area <= cv2.contourArea(contour) <= max_area])
    mask = np.ones(mono_image.shape, np.uint8)
    cv2.drawContours(mask, contours, -1, 0, -1, lineType=method)
    return image, 255 * mask
crop_morphology.py 文件源码 项目:PAN-Card-OCR 作者: dilippuri 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.cv.BoxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary)


问题


面经


文章

微信
公众号

扫码关注公众号