python类RETR_TREE的实例源码

mask_GMM.py 文件源码 项目:intel-cervical-cancer 作者: wangg12 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def cropCircle(img):
    '''
    there many imaged taken thresholded, which means many images is
    present as a circle with black surrounded. This function is to
    find the largest inscribed rectangle to the thresholed image and
    then crop the image to the rectangle.

    input: img - the cv2 module

    return: img_crop, rectangle, tile_size
    '''
    if(img.shape[0] > img.shape[1]):
        tile_size = (int(img.shape[1]*256/img.shape[0]),256)
    else:
        tile_size = (256, int(img.shape[0]*256/img.shape[1]))

    img = cv2.resize(img, dsize=tile_size)

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]

    ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)

    return [img_crop, rectangle, tile_size]
segmentation_GMM.py 文件源码 项目:intel-cervical-cancer 作者: wangg12 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cropCircle(img):
    '''
    there many imaged taken thresholded, which means many images is
    present as a circle with black surrounded. This function is to
    find the largest inscribed rectangle to the thresholed image and
    then crop the image to the rectangle.

    input: img - the cv2 module

    return: img_crop, rectangle, tile_size
    '''
    if(img.shape[0] > img.shape[1]):
        tile_size = (int(img.shape[1]*256/img.shape[0]),256)
    else:
        tile_size = (256, int(img.shape[0]*256/img.shape[1]))

    img = cv2.resize(img, dsize=tile_size)

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]

    ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)

    return [img_crop, rectangle, tile_size]
contour_tools.py 文件源码 项目:Robo-Plot 作者: JackBuck 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def extract_black_contours(img):
    """
    Extract contours for black objects on a white background.

    Args:
        img (np.ndarray): the (black and white?) image

    Returns:
        list[np.ndarray]: the contours
    """
    img_inverted = 255 - img
    _, contours, _ = cv2.findContours(img_inverted, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
    return contours


# TODO: A ContourGroups class would make this cleaner
digital_display_ocr.py 文件源码 项目:digital-display-character-rec 作者: upupnaway 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def find_display_contour(edge_img_arr):
  display_contour = None
  edge_copy = edge_img_arr.copy()
  contours,hierarchy = cv2.findContours(edge_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
  top_cntrs = sorted(contours, key = cv2.contourArea, reverse = True)[:10]

  for cntr in top_cntrs:
    peri = cv2.arcLength(cntr,True)
    approx = cv2.approxPolyDP(cntr, 0.02 * peri, True)

    if len(approx) == 4:
      display_contour = approx
      break

  return display_contour
DetectField__old_and_not_used.py 文件源码 项目:TableSoccerCV 作者: StudentCV 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def FindField(self):
        #Feld: Hue zwischen 60 und 100
        LowerGreen = np.array([40,0,0])
        UpperGreen = np.array([90,255,150])
        mask = cv2.inRange(self.ImgHSV,LowerGreen,UpperGreen)

#        plt.figure()
#        plt.imshow(mask,cmap='gray')

        mask = self.SmoothFieldMask(mask)
#        plt.figure()
#        plt.imshow(mask.copy(),cmap='gray')

        im2, contours, hierarchy = cv2.findContours(mask.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        if(len(contours) <= 0):
            return
        contours_sorted = sorted(contours, key = cv2.contourArea, reverse=True)[:10]

        peri = cv2.arcLength(contours_sorted[0],True)
        approx = cv2.approxPolyDP(contours_sorted[0], 0.02*peri, True)

        if(len(approx) >-1):#== 4):
            self.FieldContours = approx
            cv2.rectangle(mask,(((self.FieldContours[0])[0])[0],((self.FieldContours[0])[0])[1]),(((self.FieldContours[2])[0])[0],((self.FieldContours[2])[0])[1]),(128,128,128),3)
  #          plt.imshow(mask, cmap="gray")
  #          plt.show()
bounding_box.py 文件源码 项目:DAVIS-2016-Chanllege-Solution 作者: tangyuhao 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_bounding_box(img_path, outfile_name, random_noise = True):
    img = cv2.imread(img_path)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, binary_img = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY)


    # show the binary_image 
    # plt.subplot(1,1,1), plt.imshow(binary_img,'gray')
    # plt.show()
    image_cnt, contours, _ = cv2.findContours(binary_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    x,y,w,h = cv2.boundingRect(contours[0])
    if (random_noise == True):
        w_org = w
        h_org = h
        x = int(np.random.normal(x, w_org * 0.15, 1))
        y = int(np.random.normal(y, h_org * 0.15, 1))
        w = int(np.random.normal(w, w_org * 0.15, 1))
        h = int(np.random.normal(h, h_org * 0.15, 1))
    height, width, channels = img.shape
    out_img = np.zeros((height,width,1), np.uint8)
    cv2.rectangle(out_img,(x,y),(x+w,y+h),(255,0,0),-1)
    cv2.imwrite(outfile_name, out_img)
picam.py 文件源码 项目:PiCamNN 作者: PiSimo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread
motionDetect.py 文件源码 项目:Image-Processing-and-Steganogrphy 作者: motkeg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def Q2():
    cap = cv2.VideoCapture(0)

    while(cap.isOpened() ):
        ret = cap.set(3,320)
        ret = cap.set(4,240)
        ret, frame = cap.read()

        if ret==True:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            x,thresh = cv2.threshold(gray,137,255,1)
            contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
            cv2.drawContours(frame, contours,-1, (0,255,0), 3)
            cv2.imshow('Image with contours',frame)    
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    cv2.destroyAllWindows()
Falafel.py 文件源码 项目:Millennium-Eye 作者: Elysium1937 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def sizeFiltering(contours):
    #this function filters out the smaller retroreflector (as well as any noise) by size
    #_, contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contours, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    if len(contours) == 0:
        print "errorrrrr"
        return 0
    big = contours[0]
    for c in contours:
        if type(c) and type(big) == np.ndarray:
            if cv2.contourArea(c) > cv2.contourArea(big):
                big = c
        else:
            print type(c) and type(big)
            return 0
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, big, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, big, -1, (255, 255, 255))"""
    x,y,w,h = cv2.boundingRect(big)
    """cv2.rectangle(blank_image, (x,y), (x+w, y+h), (255,255,255))
    cv2.imshow("rect", blank_image)
    cv2.waitKey()"""
    return big
Falafel.py 文件源码 项目:Millennium-Eye 作者: Elysium1937 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def shapeFiltering(img):
    contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
    if len(contours) == 0:
        return "yoopsie"
    #else:
        #print contours
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contours, -1, (255, 255, 255))
    cv2.imshow("imagiae", blank_image)
    cv2.waitKey()"""
    good_shape = []
    for c in contours:
        x,y,w,h = cv2.boundingRect(c)
        """rect = cv2.minAreaRect(contour)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        w = """
        #if h == 0:
        #    continue
        ratio = w / h
        ratio_grade = ratio / (TMw / TMh)
        if 0.2 < ratio_grade < 1.8:
            good_shape.append(c)
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, good_shape, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    return good_shape
handdetector.py 文件源码 项目:deep-prior 作者: moberweger 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def track(self, com, size=(250, 250, 250), dsize=(128, 128), doHandSize=True):
        """
        Detect the hand as closest object to camera
        :param size: bounding box size
        :return: center of mass of hand
        """

        # calculate boundaries
        xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)

        # crop patch from source
        cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)

        # predict movement of CoM
        if self.refineNet is not None and self.importer is not None:
            rz = self.resizeCrop(cropped, dsize)
            newCom3D = self.refineCoM(rz, size, com) + self.importer.jointImgTo3D(com)
            com = self.importer.joint3DToImg(newCom3D)
            if numpy.allclose(com, 0.):
                com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
        else:
            raise RuntimeError("Need refineNet for this")

        if doHandSize is True:
            # refined contour for size estimation
            zstart = com[2] - size[2] / 2.
            zend = com[2] + size[2] / 2.
            part_ref = self.dpt.copy()
            part_ref[part_ref < zstart] = 0
            part_ref[part_ref > zend] = 0
            part_ref[part_ref != 0] = 10  # set to something
            ret, thresh_ref = cv2.threshold(part_ref, 1, 255, cv2.THRESH_BINARY)
            contours_ref, _ = cv2.findContours(thresh_ref.astype(dtype=numpy.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            # find the largest contour
            areas = [cv2.contourArea(cc) for cc in contours_ref]
            c_max = numpy.argmax(areas)

            # final result
            return com, self.estimateHandsize(contours_ref[c_max], com, size)
        else:
            return com, size
circle_detector.py 文件源码 项目:esys-pbi 作者: fsxfreak 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def find_concetric_circles(gray_img,min_ring_count=3, visual_debug=False):

    # get threshold image used to get crisp-clean edges using blur to remove small features
    edges = cv2.adaptiveThreshold(cv2.blur(gray_img,(3,3)), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 11)
    _, contours, hierarchy = cv2.findContours(edges,
                                    mode=cv2.RETR_TREE,
                                    method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
    if visual_debug is not False:
        cv2.drawContours(visual_debug,contours,-1,(200,0,0))
    if contours is None or hierarchy is None:
        return []
    clusters = get_nested_clusters(contours,hierarchy[0],min_nested_count=min_ring_count)
    concentric_cirlce_clusters = []

    #speed up code by caching computed ellipses
    ellipses = {}

    # for each cluster fit ellipses and cull members that dont have good ellipse fit
    for cluster in clusters:
        if visual_debug is not False:
            cv2.drawContours(visual_debug, [contours[i] for i in cluster],-1, (0,0,255))
        candidate_ellipses = []
        for i in cluster:
            c = contours[i]
            if len(c)>5:
                if not i in ellipses:
                    e = cv2.fitEllipse(c)
                    fit = max(dist_pts_ellipse(e,c))
                    ellipses[i] = e,fit
                else:
                    e,fit = ellipses[i]
                a,b = e[1][0]/2.,e[1][1]/2.
                if fit<max(2,max(e[1])/20):
                    candidate_ellipses.append(e)
                    if visual_debug is not False:
                        cv2.ellipse(visual_debug, e, (0,255,0),1)

        if candidate_ellipses:
            cluster_center = np.mean(np.array([e[0] for e in candidate_ellipses]),axis=0)
            candidate_ellipses = [e for e in candidate_ellipses if np.linalg.norm(e[0]-cluster_center)<max(3,min(e[1])/20) ]
            if len(candidate_ellipses) >= min_ring_count:
                concentric_cirlce_clusters.append(candidate_ellipses)
                if visual_debug is not False:
                    cv2.ellipse(visual_debug, candidate_ellipses[-1], (0,255,255),4)

    #return clusters sorted by size of outmost cirlce biggest first.
    return sorted(concentric_cirlce_clusters,key=lambda e:-max(e[-1][1]))
gesture_hci.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def single_finger_check(self, cnt):
        # use single finger image to check current fame has single finger
        grey_fin1 = cv2.cvtColor(self.fin1, cv2.COLOR_BGR2GRAY)
        _, thresh_fin1 = cv2.threshold(grey_fin1, 127, 255, 0)
        contour_fin1, hierarchy = cv2.findContours(thresh_fin1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        cnt1 = contour_fin1[0]
        ret1 = cv2.matchShapes(cnt, cnt1, 1, 0)

        grey_fin2 = cv2.cvtColor(self.fin2, cv2.COLOR_BGR2GRAY)
        _, thresh_fin2 = cv2.threshold(grey_fin2, 127, 255, 0)
        contour_fin2, hierarchy = cv2.findContours(thresh_fin2.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        cnt2 = contour_fin2[0]
        ret2 = cv2.matchShapes(cnt, cnt2, 1, 0)

        grey_fin3 = cv2.cvtColor(self.fin3, cv2.COLOR_BGR2GRAY)
        _, thresh_fin3 = cv2.threshold(grey_fin3, 127, 255, 0)
        contour_fin3, hierarchy = cv2.findContours(thresh_fin3.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        cnt3 = contour_fin3[0]
        ret3 = cv2.matchShapes(cnt, cnt3, 1, 0)
        reta = (ret1 + ret2 + ret3)/3
        if reta <= 0.3:
            return 5        # set as one-finger module
        else:
            return 0        # not detect, still 0


# Use PyAutoGUI to control mouse event
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def find(self, mode):
        self.mode = mode
        if mode == 1:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding'
            self.gaussianBlur()
            self.cannyThresholding()
        elif mode == 2:
            # Check Gimp Hints
            self.gimpMarkup()
            #self.cannyThresholding()
            self.modeDesc = 'Run gimpMarkup'
        elif mode == 3:
            # Massively mask red as a precursor phase
            self.gaussianBlur()
            self.colourMapping()
            self.solidRedFilter()
            #self.cannyThresholding()
            self.modeDesc = 'Run gaussianBlur(), colourMapping(), solidRedFilter(), #cannyThresholding'
        elif mode == 4:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding with RETR_EXTERNAL contour removal mode'
            self.gaussianBlur()
            self.cannyThresholding(cv2.RETR_EXTERNAL)
        elif mode == 5:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding with RETR_TREE contour removal mode'
            self.gaussianBlur()
            self.cannyThresholding(cv2.RETR_TREE)
# Apply Heuristics to filter out false
        self.squares = filterContoursRemove(self.img, self.squares)
        return self.squares
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def binaryContoursNestingFilterHeuristic(img, cnts, *args, **kwargs):
    '''
    Concept  : Use the found contours, with binary drawn contours to extract hierarchy and hence filter on nesting.
    Critique : WIP
    '''
    # Set the image to black (0): 
    img[:,:] = (0,0,0)
    # Draw all of the contours on the image in white
    contours = [c.contour for c in cnts]
    cv2.drawContours( img, contours, -1, (255, 255, 255), 1 )
    iv = ImageViewer(img)
    iv.windowShow()
    # Now extract any channel
    gray = cv2.split(img)[0]
    iv = ImageViewer(gray)
    iv.windowShow()
    retval, bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Now find the contours again, but this time we care about hierarchy (hence _TREE) - we get back next, previous, first_child, parent
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Alternative flags : only take the external contours
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    return cnts
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def usage():
    print('''
    piwall.py 
     --vssdemo|-v rotating    : iterate the VideoSquareSearch over rotating video, and output located data in piwall-search-mono.avi
     --vssdemo|-v album       : iterate the VideoSquareSearch over sequence of images, and output located data in album.avi
     --sfv3mode|-s [mode 1-3] : run the SquareLocatorV3 algorithm : set the mode 1-3     < default image 2x2-red-1.jpg >
                               : 1 => call gaussianBlur(); cannyThresholding()
                               : 2 => call gimpMarkup()
                               : 3 => call gaussianBlur(); colourMapping(); solidRedFilter(); [#cannyThresholding]
                               : 4 => as 1, but with cv2.RETR_EXTERNAL as contour_retrieval_mode
                               : 5 => as 1, but with cv2.RETR_TREE as contour_retrieval_mode, then filter only outermost contours
                               : 6 => new model which takes a series of images which have transitions that identify the monitors.
     --sfv3img|-i [image path]: run the SquareFinderV3 algorithm  : set the input image  < default mode 1>
     --sfv4glob|-g [image glob pattern] : set the series of input images to be pattern-[%03d].jpg
    ''')
find_bibs.py 文件源码 项目:bib-tagger 作者: KateRita 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def find_contours(image):
  #return cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE);
  #return cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
  return cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE);
predict.py 文件源码 项目:cervix-roi-segmentation-by-unet 作者: scottykwok 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def find_bbox(mask_binary, margin_factor=None):
    ret, thresh = cv2.threshold(mask_binary, 127, 255, 0)
    _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # Find the index of the largest contour
    areas = [cv2.contourArea(c) for c in contours]
    if len(areas) == 0:
        return (0, 0, mask_binary.shape[0], mask_binary.shape[1], False)
    else:
        max_index = np.argmax(areas)
        cnt = contours[max_index]

        x, y, w, h = cv2.boundingRect(cnt)

        if margin_factor != None and margin_factor > 0:
            wm = w * margin_factor
            hm = h * margin_factor
            x -= wm
            y -= hm
            w += 2 * wm
            h += 2 * hm
            x = max(0, x)
            y = max(0, y)
            X = min(x + w, mask_binary.shape[1])
            Y = min(y + h, mask_binary.shape[0])
            w = X - x
            h = Y - y
        return (int(x), int(y), int(w), int(h), True)
app.py 文件源码 项目:slide_captcha_cracker 作者: chxj1992 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_operator(path, url=False, expand=False):
    if url:
        req = requests.get(path)
        arr = np.asarray(bytearray(req.content), dtype=np.uint8)
        shape = cv2.resize(cv2.imdecode(arr, -1), (69, 69))
    else:
        shape = cv2.resize(cv2.imread('shape.png'), (69, 69))

    shape_gray = cv2.cvtColor(shape, cv2.COLOR_BGR2GRAY)

    _, shape_binary = cv2.threshold(shape_gray, 127, 255, cv2.THRESH_BINARY)

    _, contours, hierarchy = cv2.findContours(shape_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    contour = contours[0]

    operator = np.zeros((69, 69))

    for point in contour:
        operator[point[0][0]][point[0][1]] = 1
        if expand:
            if point[0][0] > 0:
                operator[point[0][0] - 1][point[0][1]] = 1
            if point[0][0] < 68:
                operator[point[0][0] + 1][point[0][1]] = 1
            if point[0][1] > 0:
                operator[point[0][0]][point[0][1] - 1] = 1
            if point[0][1] < 68:
                operator[point[0][0]][point[0][1] + 1] = 1

    return operator


问题


面经


文章

微信
公众号

扫码关注公众号