python类RETR_EXTERNAL的实例源码

sudoku.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        return vertices
tracking.py 文件源码 项目:APEX 作者: ymollard 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def find_center(self, name, frame, mask, min_radius):
        if name not in self.pts:
            self.pts[name] = deque(maxlen=self.params['tracking']['buffer_size'])

        # find contours in the mask and initialize the current (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            center = (int(x), int(y))

            # only proceed if the radius meets a minimum size
            if radius > min_radius:
                # draw the circle and centroid on the frame, then update the list of tracked points
                cv2.circle(frame, center, int(radius), (0, 255, 255), 2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)
                self.pts[name].appendleft(center)
                smooth_points = 8
                return (int(np.mean([self.pts[name][i][0] for i in range(min(smooth_points, len(self.pts[name])))])),
                        int(np.mean([self.pts[name][i][1] for i in range(min(smooth_points, len(self.pts[name])))]))), radius
        return None, None
10-PiStorms_icontracker.py 文件源码 项目:PiStorms 作者: mindsensors 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def findSquare( self,frame ):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 60, 60)
        # find contours in the edge map
        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # loop over our contours to find hexagon
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50]
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.004 * peri, True)
            # if our approximated contour has four points, then
            # we can assume that we have found our squeare

            if len(approx) >= 4:
                screenCnt = approx
                x,y,w,h = cv2.boundingRect(c)
                cv2.drawContours(image, [approx], -1, (0, 0, 255), 1)
                #cv2.imshow("Screen", image)
                #create the mask and remove rest of the background
                mask = np.zeros(image.shape[:2], dtype = "uint8")
                cv2.drawContours(mask, [screenCnt], -1, 255, -1)
                masked = cv2.bitwise_and(image, image, mask = mask)
                #cv2.imshow("Masked",masked  )
                #crop the masked image to to be compared to referance image
                cropped = masked[y:y+h,x:x+w]
                #scale the image so it is fixed size as referance image
                cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA)

                return cropped
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
        '''
        contour_retrieval_mode is passed through as second argument to cv2.findContours
        '''

        # Attempt to match edges found in blue, green or red channels : collect all
        channel = 0
        for gray in cv2.split(self.img):
            channel += 1
            print('channel %d ' % channel)
            title = self.tgen.next('channel-%d' % channel)
            if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
            found = {}
            for thrs in xrange(0, 255, 26):
                print('Using threshold %d' % thrs)
                if thrs == 0:
                    print('First step')
                    bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                    title = self.tgen.next('canny-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                    bin = cv2.dilate(bin, None)
                    title = self.tgen.next('canny-dilate-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                else:
                    retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
                    title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
                    if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
                bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
                title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
                if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
                    filteredContours = contours
                else:
                    filteredContours = []
                    h = hierarchy[0]
                    for component in zip(contours, h):
                        currentContour = component[0]
                        currentHierarchy = component[1]
                        if currentHierarchy[3] < 0:
                            # Found the outermost parent component
                            filteredContours.append(currentContour)
                    print('Contours filtered.   Input %d  Output %d' % (len(contours), len(filteredContours)))
                    time.sleep(5)
                for cnt in filteredContours:
                    cnt_len = cv2.arcLength(cnt, True)
                    cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                    cnt_len = len(cnt)
                    cnt_area = cv2.contourArea(cnt)
                    cnt_isConvex = cv2.isContourConvex(cnt)
                    if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max)  and cnt_isConvex:
                        cnt = cnt.reshape(-1, 2)
                        max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                        if max_cos < self.cos_limit :
                            sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
                            self.squares.append(sq)
                        else:
                            #print('dropped a square with max_cos %f' % max_cos)
                            pass
                found[thrs] = len(self.squares)
                print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
frying.py 文件源码 项目:DeepFryBot 作者: asdvek 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def find_chars(img):
    gray = np.array(img.convert("L"))
    ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
    image_final = cv2.bitwise_and(gray, gray, mask=mask)
    ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new_img, kernel, iterations=1)
    # Image.fromarray(dilated).save('out.png') # for debugging
    _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    coords = []
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # ignore large chars (probably not chars)
        if w > 70 and h > 70:
            continue
        coords.append((x, y, w, h))
    return coords


# find list of eye coordinates in image
test_nox.py 文件源码 项目:Yugioh-bot 作者: will7200 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images)
pyfrp_zstack_module.py 文件源码 项目:PyFRAP 作者: alexblaessle 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def getContours(img,kernel=(10,10)):

    #Define kernel
    kernel = np.ones(kernel, np.uint8)

    #Open to erode small patches
    thresh = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)

    #Close little holes
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,kernel, iterations=4)

    #Find contours
    #contours=skimsr.find_contours(thresh,0)

    thresh=thresh.astype('uint8')
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)

    areas=[]
    for c in contours:
        areas.append(cv2.contourArea(c))

    return contours,thresh,areas
preprocessing.py 文件源码 项目:pycolor_detection 作者: parth1993 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def findSignificantContours(img, sobel_8u, sobel):
    image, contours, heirarchy = cv2.findContours(sobel_8u, \
                                                  cv2.RETR_EXTERNAL, \
                                                  cv2.CHAIN_APPROX_SIMPLE)
    mask = np.ones(image.shape[:2], dtype="uint8") * 255

    level1 = []
    for i, tupl in enumerate(heirarchy[0]):

        if tupl[3] == -1:
            tupl = np.insert(tupl, 0, [i])
            level1.append(tupl)
    significant = []
    tooSmall = sobel_8u.size * 10 / 100
    for tupl in level1:
        contour = contours[tupl[0]];
        area = cv2.contourArea(contour)
        if area > tooSmall:
            cv2.drawContours(mask, \
                             [contour], 0, (0, 255, 0), \
                             2, cv2.LINE_AA, maxLevel=1)
            significant.append([contour, area])
    significant.sort(key=lambda x: x[1])
    significant = [x[0] for x in significant];
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
    mask = sobel.copy()
    mask[mask > 0] = 0
    cv2.fillPoly(mask, significant, 255, 0)
    mask = np.logical_not(mask)
    img[mask] = 0;

    return img
navigation.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def find_contours(mask, smooth_factor=0.005):
        """ Find the contours in a given mask """
        border = 5
        # Canny detection breaks down with the edge of the image
        my_mask = cv2.copyMakeBorder(mask, border, border, border, border,
                                     cv2.BORDER_CONSTANT, value=(0, 0, 0))

        my_mask = cv2.cvtColor(my_mask, cv2.COLOR_BGR2GRAY)

        if is_cv2():
            contours, _ = cv2.findContours(my_mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, _ = cv2.findContours(my_mask, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE)

        # shift the contours back down
        for contour in contours:
            for pnt in contour:
                if pnt[0][1] > border:
                    pnt[0][1] = pnt[0][1] - border
                else:
                    pnt[0][1] = 0
                if pnt[0][0] > border:
                    pnt[0][0] = pnt[0][0] - border
                else:
                    pnt[0][0] = 0

        closed_contours = []
        for contour in contours:
            epsilon = smooth_factor*cv2.arcLength(contour, True)
            approx = cv2.approxPolyDP(contour, epsilon, True)
            area = cv2.contourArea(approx)
            # if they are too small they are not edges
            if area < 200:
                continue
            closed_contours.append(approx)

        return closed_contours
cv_utils.py 文件源码 项目:vision 作者: SouthEugeneRoboticsTeam 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_largest(im, n):
    # Find contours of the shape
    major = cv2.__version__.split('.')[0]
    if major == '3':
        _, contours, _ = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    else:
        contours, _ = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # Cycle through contours and add area to array
    areas = []
    for c in contours:
        areas.append(cv2.contourArea(c))

    # Sort array of areas by size
    sorted_areas = sorted(zip(areas, contours), key=lambda x: x[0], reverse=True)

    if sorted_areas and len(sorted_areas) >= n:
        # Find nth largest using data[n-1][1]
        return sorted_areas[n - 1][1]
    else:
        return None
Minimap.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def findFishingIcon():
    #fish color
    low = np.array([93,119,84])
    high = np.array([121,255,255])
    mask, mm_x, mm_y = get_mini_map_mask(low, high)

    _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for c in contours:
        (x, y, w, h) = cv2.boundingRect(c)
        x += mm_x
        y += mm_y
        x2 = x + w
        y2 = y + h
        Mouse.randMove(x,y,x2,y2,1)
        run= 0
        RandTime.randTime(1,0,0,1,9,9)
        return 0
    return 1
fish_guild.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def findBankIcon(self):
        # bank colo
        low = np.array([26,160,176])
        high = np.array([27,244,228])
        mask, mm_x, mm_y = self.mini_map_mask(low, high)

        cv2.imshow('mask', mask)
        cv2.waitKey(0)

        _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        for c in contours:
            (x, y, w, h) = cv2.boundingRect(c)
            x += 568 
            y += 36
            x2 = x + w 
            y2 = y + h 
            Mouse.randMove(x,y,x2,y2,1)
            run= 0 
            time.sleep(1)
            return
main.py 文件源码 项目:document-layout-analysis 作者: rbaguila 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def process_letter(thresh,output):  
    # assign the kernel size    
    kernel = np.ones((2,1), np.uint8) # vertical
    # use closing morph operation then erode to narrow the image    
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
    # temp_img = cv2.erode(thresh,kernel,iterations=2)      
    letter_img = cv2.erode(temp_img,kernel,iterations=1)

    # find contours 
    (contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    # loop in all the contour areas
    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   


#processing letter by letter boxing
main.py 文件源码 项目:document-layout-analysis 作者: rbaguila 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def process_word(thresh,output):    
    # assign 2 rectangle kernel size 1 vertical and the other will be horizontal    
    kernel = np.ones((2,1), np.uint8)
    kernel2 = np.ones((1,4), np.uint8)
    # use closing morph operation but fewer iterations than the letter then erode to narrow the image   
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2)
    #temp_img = cv2.erode(thresh,kernel,iterations=2)   
    word_img = cv2.dilate(temp_img,kernel2,iterations=1)

    (contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   

#processing line by line boxing
main.py 文件源码 项目:document-layout-analysis 作者: rbaguila 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def process_line(thresh,output):    
    # assign a rectangle kernel size    1 vertical and the other will be horizontal
    kernel = np.ones((1,5), np.uint8)
    kernel2 = np.ones((2,4), np.uint8)  
    # use closing morph operation but fewer iterations than the letter then erode to narrow the image   
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2)
    #temp_img = cv2.erode(thresh,kernel,iterations=2)   
    line_img = cv2.dilate(temp_img,kernel,iterations=5)

    (contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   

#processing par by par boxing
vision.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def find(self, mode):
        self.mode = mode
        if mode == 1:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding'
            self.gaussianBlur()
            self.cannyThresholding()
        elif mode == 2:
            # Check Gimp Hints
            self.gimpMarkup()
            #self.cannyThresholding()
            self.modeDesc = 'Run gimpMarkup'
        elif mode == 3:
            # Massively mask red as a precursor phase
            self.gaussianBlur()
            self.colourMapping()
            self.solidRedFilter()
            #self.cannyThresholding()
            self.modeDesc = 'Run gaussianBlur(), colourMapping(), solidRedFilter(), #cannyThresholding'
        elif mode == 4:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding with RETR_EXTERNAL contour removal mode'
            self.gaussianBlur()
            self.cannyThresholding(cv2.RETR_EXTERNAL)
        elif mode == 5:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding with RETR_TREE contour removal mode'
            self.gaussianBlur()
            self.cannyThresholding(cv2.RETR_TREE)
# Apply Heuristics to filter out false
        self.squares = filterContoursRemove(self.img, self.squares)
        return self.squares
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def binaryContoursNestingFilterHeuristic(img, cnts, *args, **kwargs):
    '''
    Concept  : Use the found contours, with binary drawn contours to extract hierarchy and hence filter on nesting.
    Critique : WIP
    '''
    # Set the image to black (0): 
    img[:,:] = (0,0,0)
    # Draw all of the contours on the image in white
    contours = [c.contour for c in cnts]
    cv2.drawContours( img, contours, -1, (255, 255, 255), 1 )
    iv = ImageViewer(img)
    iv.windowShow()
    # Now extract any channel
    gray = cv2.split(img)[0]
    iv = ImageViewer(gray)
    iv.windowShow()
    retval, bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Now find the contours again, but this time we care about hierarchy (hence _TREE) - we get back next, previous, first_child, parent
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Alternative flags : only take the external contours
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    return cnts
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def usage():
    print('''
    piwall.py 
     --vssdemo|-v rotating    : iterate the VideoSquareSearch over rotating video, and output located data in piwall-search-mono.avi
     --vssdemo|-v album       : iterate the VideoSquareSearch over sequence of images, and output located data in album.avi
     --sfv3mode|-s [mode 1-3] : run the SquareLocatorV3 algorithm : set the mode 1-3     < default image 2x2-red-1.jpg >
                               : 1 => call gaussianBlur(); cannyThresholding()
                               : 2 => call gimpMarkup()
                               : 3 => call gaussianBlur(); colourMapping(); solidRedFilter(); [#cannyThresholding]
                               : 4 => as 1, but with cv2.RETR_EXTERNAL as contour_retrieval_mode
                               : 5 => as 1, but with cv2.RETR_TREE as contour_retrieval_mode, then filter only outermost contours
                               : 6 => new model which takes a series of images which have transitions that identify the monitors.
     --sfv3img|-i [image path]: run the SquareFinderV3 algorithm  : set the input image  < default mode 1>
     --sfv4glob|-g [image glob pattern] : set the series of input images to be pattern-[%03d].jpg
    ''')
find_bibs.py 文件源码 项目:bib-tagger 作者: KateRita 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def find_contours(image):
  #return cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE);
  #return cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
  return cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE);
ColoredObjectDetector.py 文件源码 项目:robot-camera-platform 作者: danionescu0 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def find(self, image):
        hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1])
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        if len(contours) == 0:
            return (False, False)
        largest_contour = max(contours, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(largest_contour)
        M = cv2.moments(largest_contour)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        return (center, radius)
dreammarket.py 文件源码 项目:dminer 作者: infosecanon 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def process_captcha(self, image):
        """
        TODO: DOC
        """
        cv2_img = cv2.cvtColor(numpy.array(image), cv2.COLOR_BGR2GRAY)

        # Find the threshold of the image so that we can identify contours.
        ret, thresh = cv2.threshold(
            cv2_img,
            127,
            255,
            cv2.ADAPTIVE_THRESH_GAUSSIAN_C
        )
        # Find the contours of the image
        _, contours, hierarchy = cv2.findContours(
            thresh,
            cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE
        )

        # Find the largest contour in the image with 4 points. This is the
        # rectangle that is required to crop to for the captcha.
        largest_contour = None
        for contour in contours:
            if (len(cv2.approxPolyDP(contour, 0.1*cv2.arcLength(contour, True), True)) == 4) and (2500 < cv2.contourArea(contour) < 4000):
                if isinstance(largest_contour, type(None)):
                    largest_contour = contour
                    continue
                if cv2.contourArea(contour) > cv2.contourArea(largest_contour):
                    largest_contour = contour
        # If we don't have a matching contour, don't try to crop and such
        if isinstance(largest_contour, type(None)):
            return None

        # If we do have a matching contour, build the rectangle
        crop_x, crop_y, crop_width, crop_height = cv2.boundingRect(
            largest_contour
        )
        # Crop down to the contour rectangle
        image = image.crop(
            (
                crop_x,
                crop_y,
                crop_x + crop_width,
                crop_y + crop_height
            )
        )
        return image
segmentation.py 文件源码 项目:pyceratOpsRecs 作者: USCSoftwareEngineeringClub 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def segment(im):
    """
    :param im:
        Image to detect digits and operations in

    """

    gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) #grayscale
    blur = cv2.GaussianBlur(gray,(5,5),0) #smooth image to reduce noise
    #adaptive thresholding for different lighting conditions
    thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)

    #################     Now finding Contours     ###################
    image,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    samples =  np.empty((0,100))
    keys = [i for i in range(48,58)]

    for cnt in contours:
        if cv2.contourArea(cnt) > 20:
            [x,y,w,h] = cv2.boundingRect(cnt)

            #Draw bounding box for it, then resize to 10x10, and store its pixel values in an array
            if  h>1:
                cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
                roi = thresh[y:y+h,x:x+w]
                roismall = cv2.resize(roi,(10,10))
                cv2.imshow('detecting',im)
                key = cv2.waitKey(0)

                if key == 27:  # (escape to quit)
                    sys.exit()
                else: #press any key to continue
                    sample = roismall.reshape((1,100))
                    samples = np.append(samples,sample,0)

    print "segmentation complete"

    cv2.imwrite('data/seg_result.png',im)
    np.savetxt('data/generalsamples.data',samples)
things.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def is_detector(self, img):
        """ This uses color to determine if we have a detector, and if so, returns where
            the big screen and smaller screen is in the subimage """
        lower = np.array([190, 190, 0], dtype = "uint8")
        upper = np.array([255, 255, 100], dtype = "uint8")
        mask = cv2.inRange(img, lower, upper)

        contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]
        if len(contours) == 0:
            return None, False

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))
        center, radius = cv2.minEnclosingCircle(sorted_contours[0])
        up = True
        if len(contours) > 1:
            center2, radius = cv2.minEnclosingCircle(sorted_contours[1])
            if center2[1] < center[1]:
                up = False

        if self.debug:
            debug_img = img.copy()
            cv2.drawContours(debug_img, [sorted_contours[0]], -1, (0, 255, 0), 2)
            cv2.imshow("cont", debug_img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return center, up
things.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def is_repair_tool(self, img):
        """ This uses color to determine if we have a repair tool, and if so, returns where
            the button is located within the provided subimage """
        lower = np.array([190, 0, 0], dtype = "uint8")
        upper = np.array([255, 125, 100], dtype = "uint8")
        mask = cv2.inRange(img, lower, upper)

        contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]
        if len(contours) == 0:
            return None, False

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))
        center, radius = cv2.minEnclosingCircle(sorted_contours[0])
        up = True
        if center[1] > (img.shape[0] / 2):
            up = False

        if self.debug:
            debug_img = img.copy()
            cv2.drawContours(debug_img, [sorted_contours[0]], -1, (0, 255, 0), 2)
            cv2.imshow("cont", debug_img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return center, up
things.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _find_power_plug_thing(self):
        """ Find the power plug at the solar array box """

        """ This uses color to determine if we have a choke """
        lower = np.array([100, 40, 0], dtype = "uint8")
        upper = np.array([255, 255, 20], dtype = "uint8")
        mask = cv2.inRange(self.img, lower, upper)

        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))

        if len(sorted_contours) > 0:
            plug = self._find_a_thing(sorted_contours[0], 0, 0.06, 0, 0.06, 99.0)

            if plug is not None:
                plug.set_power_plug()
                self.things.append(plug)
                self.power_plug = plug

                if self.debug:
                    debug_img = self.img.copy()
                    for c in sorted_contours:
                        cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
                    cv2.imshow("plug picture", debug_img)
                    cv2.setMouseCallback("plug picture", self.handle_mouse)
                    cv2.waitKey(0)
                    cv2.destroyAllWindows()
training_eyes.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def process_image(self, cv_image, header, tag):
        """ process the image """
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

        # mask for color range
        if self.color_range:
            mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1])
            count = cv2.countNonZero(mask)
            if count:
                kernel = np.ones((5, 5), np.uint8)
                mask = cv2.dilate(mask, kernel, iterations=2)
                contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)

                for i, c in enumerate(contours):
                    x, y, w, h = cv2.boundingRect(c)
                    if self.prefix is not None:
                        name = '{0}{1}_{2}_{3}.png'.format(self.prefix,
                                                           tag,
                                                           header.seq, i)
                        print name
                        roi = cv_image[y:y+h, x:x+w]
                        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                        gray = cv2.equalizeHist(gray)
                        cv2.imwrite(name, gray)

                for c in contours:
                    x, y, w, h = cv2.boundingRect(c)
                    cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0))
            elif self.prefix is not None:
                name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag,
                                                        header.seq, )
                cv2.imwrite(name, cv_image)

        cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(tag, 600, 600)
        cv2.imshow(tag, cv_image)
        cv2.waitKey(1)
retrotape_old.py 文件源码 项目:StormCV2017 作者: 2729StormRobotics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
retrotape.py 文件源码 项目:StormCV2017 作者: 2729StormRobotics 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours


问题


面经


文章

微信
公众号

扫码关注公众号