python类THRESH_BINARY_INV的实例源码

scanchars.py 文件源码 项目:handfontgen 作者: nixeneko 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def getmarkerboundingrect(img, mkpos, mksize):
    buffer = int(mksize * 0.15)
    x = mkpos[0] - buffer
    y = mkpos[1] - buffer
    w = mksize + buffer*2
    h = mksize + buffer*2
    roi = img[y:y+h, x:x+w]

    grayroi = getgrayimage(roi)
    ret, binimage = cv2.threshold(grayroi,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimage)
    # stats[0], centroids[0] are for the background label. ignore
    # cv2.CC_STAT_LEFT, cv2.CC_STAT_TOP, cv2.CC_STAT_WIDTH, cv2.CC_STAT_HEIGHT
    lblareas = stats[1:,cv2.CC_STAT_AREA]
    imax = max(enumerate(lblareas), key=(lambda x: x[1]))[0] + 1
    boundingrect = Rect(stats[imax, cv2.CC_STAT_LEFT],
                        stats[imax, cv2.CC_STAT_TOP], 
                        stats[imax, cv2.CC_STAT_WIDTH], 
                        stats[imax, cv2.CC_STAT_HEIGHT])
    return boundingrect.addoffset((x,y))
slantcorrection.py 文件源码 项目:handfontgen 作者: nixeneko 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def getmarkercenter(image, pos):
    mkradius = getapproxmarkerradius(image)
    buffer = int(mkradius * 0.15)
    roisize = mkradius + buffer # half of the height or width
    x = pos[0] - roisize
    y = pos[1] - roisize
    w = 2 * roisize
    h = 2 * roisize
    roi = image[y:y+h, x:x+w]

    grayroi = getgrayimage(roi)
    ret, binimage = cv2.threshold(grayroi,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimage)
    # stats[0], centroids[0] are for the background label. ignore
    lblareas = stats[1:,cv2.CC_STAT_AREA]

    ave = np.average(centroids[1:], axis=0, weights=lblareas)
    return tuple(np.array([x, y]) + ave) # weighted average pos of centroids
helpers.py 文件源码 项目:opencv-helpers 作者: abarrak 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def adaptive_threshold(image, above_thresh_assigned=255, kind='mean', cell_size=35, c_param=17,
                       thresh_style=cv.THRESH_BINARY_INV):
  '''
  :param kind: specify adaptive method, whether 'mean' or 'gaussian'.
  :param cell_size: n for the region size (n x n).
  :param c_param: subtraction constant.
  :return: a binary version of the input image.
  '''
  if kind == 'mean':
    method = cv.ADAPTIVE_THRESH_MEAN_C
  elif kind == 'gaussian':
    method = cv.ADAPTIVE_THRESH_GAUSSIAN_C
  else:
    raise ValueError('Unknown adaptive threshold method.')

  return cv.adaptiveThreshold(image, above_thresh_assigned, method, thresh_style, cell_size, c_param)
forest.py 文件源码 项目:checkmymeat 作者: kendricktan 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def predict(url):
    global model      
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    features = describe(image, mask)

    state = le.inverse_transform(model.predict([features]))[0]
    return {'type': state}
frying.py 文件源码 项目:DeepFryBot 作者: asdvek 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def find_chars(img):
    gray = np.array(img.convert("L"))
    ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
    image_final = cv2.bitwise_and(gray, gray, mask=mask)
    ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new_img, kernel, iterations=1)
    # Image.fromarray(dilated).save('out.png') # for debugging
    _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    coords = []
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # ignore large chars (probably not chars)
        if w > 70 and h > 70:
            continue
        coords.append((x, y, w, h))
    return coords


# find list of eye coordinates in image
test_nox.py 文件源码 项目:Yugioh-bot 作者: will7200 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images)
page.py 文件源码 项目:skastic 作者: mypalmike 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load(self, filename, analyze_only):
    # Load image, then do various conversions and thresholding.
    self.img_orig = cv2.imread(filename, cv2.IMREAD_COLOR)

    if self.img_orig is None:
      raise CompilerException("File '{}' not found".format(filename))

    self.img_grey = cv2.cvtColor(self.img_orig, cv2.COLOR_BGR2GRAY)
    _, self.img_contour = cv2.threshold(self.img_grey, 250, 255, cv2.THRESH_BINARY_INV)
    _, self.img_text = cv2.threshold(self.img_grey, 150, 255, cv2.THRESH_BINARY)
    self.root_node = None

    self.contours = self.find_contours()

    self.contour_lines, self.contour_nodes = self.categorize_contours()

    self.build_graph()
    self.build_parse_tree()

    self.parse_nodes()

    if not analyze_only:
      self.python_ast = self.root_node.to_python_ast()
AnalizeFrame.py 文件源码 项目:serbian-alpr 作者: golubaca 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def foreground(self, image, smooth=False, grayscale=False):
        """
        Extract foreground from background
        :param image:
        :param smooth:
        :param grayscale:
        :return:
        """
        if smooth and grayscale:
            image = self.toGrayscale(image)
            image = self.smooth(image)
        elif smooth:
            image = self.smooth(image)
        elif grayscale:
            image = self.toGrayscale(image)
        fgmask = self.fgbg.apply(image)
        ret, mask = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY_INV)
        mask_inv = cv2.bitwise_not(mask)
        return mask_inv
extract_color.py 文件源码 项目:python-image-processing 作者: karaage0703 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def extract_color( src, h_th_low, h_th_up, s_th, v_th ):
    hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    if h_th_low > h_th_up:
        ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) 
        ret, h_dst_2 = cv2.threshold(h, h_th_up,  255, cv2.THRESH_BINARY_INV)

        dst = cv2.bitwise_or(h_dst_1, h_dst_2)
    else:
        ret, dst = cv2.threshold(h,   h_th_low, 255, cv2.THRESH_TOZERO) 
        ret, dst = cv2.threshold(dst, h_th_up,  255, cv2.THRESH_TOZERO_INV)
        ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY)

    ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY)
    ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY)
    dst = cv2.bitwise_and(dst, s_dst)
    dst = cv2.bitwise_and(dst, v_dst)
    return dst
main_function.py 文件源码 项目:edison_developing 作者: vincentchung 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def camera_gesture_trigger():
    # Capture frame-by-frame
    ret, frame = cap.read()
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray,(5,5),0)
    ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

    contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    max_area=0

    for i in range(len(contours)):
        cnt=contours[i]
        area = cv2.contourArea(cnt)
        if(area>max_area):
            max_area=area
            ci=i
    cnt=contours[ci]
    hull = cv2.convexHull(cnt)
    moments = cv2.moments(cnt)

    cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
    hull = cv2.convexHull(cnt,returnPoints = False)

    defects = cv2.convexityDefects(cnt,hull)                    

    if defects is not None:         
        if defects.shape[0] >= 5:
            return 1

    return 0
preprocessor_eval.py 文件源码 项目:HandwritingRecognition 作者: eng-tsmith 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret3, img_binary = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)

    # invert black = 255
    ret, thresh1 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)

    return thresh1
preprocessor.py 文件源码 项目:HandwritingRecognition 作者: eng-tsmith 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Global
    # ret1, thresh1 = cv.threshold(img_grey, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh1)
    #
    # # Adaptive Mean
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
    # ret2, thresh2 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh2)
    #
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
    # ret3, thresh3 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh3)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret4, img_otsu = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
    ret4, thresh4 = cv.threshold(img_otsu, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh4)

    return thresh4
athreshold_plg.py 文件源码 项目:opencv-plgs 作者: Image-Py 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run(self, ips, snap, img, para = None):
        med = cv2.ADAPTIVE_THRESH_MEAN_C if para['med']=='mean' else cv2.ADAPTIVE_THRESH_GAUSSIAN_C
        mtype = cv2.THRESH_BINARY_INV if para['inv'] else cv2.THRESH_BINARY
        cv2.adaptiveThreshold(snap, para['max'], med, para['inv'], para['size'], para['offset'], dst=img)
ch4.py 文件源码 项目:diagnose-heart 作者: woshialex 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def process_data():
    all_data = []
    img_size = 256
    contour_path= os.path.join(c.data_manual, 'manual_contours_ch4', 'contours')
    image_path = os.path.join(c.data_manual, 'manual_contours_ch4', 'images')
    for fn in [f for f in os.listdir(contour_path) if 'jpg' in f]:
        if not os.path.exists(os.path.join(image_path, fn)):
            continue
        img = cv2.imread(os.path.join(image_path, fn), 0)
        img = cv2.resize(img, (img_size,img_size)).reshape(1,1,img_size,img_size)
        label = cv2.imread(os.path.join(contour_path, fn), 0)
        label = cv2.resize(label, (img_size,img_size))
        _,label = cv2.threshold(label, 127,255,cv2.THRESH_BINARY_INV)
        label = label.reshape(1,1,img_size,img_size)/255
        all_data.append([img,label])
    np.random.shuffle(all_data)
    all_imgs = np.concatenate([a[0] for a in all_data], axis=0)
    all_labels = np.concatenate([a[1] for a in all_data], axis=0)
    n = all_imgs.shape[0]
    destpath = os.path.join(c.data_intermediate, 'ch4_{}.hdf5'.format(img_size))
    if os.path.exists(destpath):
        os.remove(destpath)
    u.save_hd5py({'images': all_imgs, 'labels': all_labels}, destpath, 5)
morphology_utils.py 文件源码 项目:Shoe-Shape-Classifier 作者: jrzaurin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def threshold_img(img):
    """
    Simple wrap-up function for cv2.threshold()
    """

    is_color = len(img.shape) == 3
    is_grey  = len(img.shape) == 2

    t = threshold_value(img)

    if is_color:
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    elif is_grey:
        gray = img.copy()

    blurred = cv2.GaussianBlur(gray, (3, 3), 0)
    (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV)

    return thresh
morphology_utils.py 文件源码 项目:Shoe-Shape-Classifier 作者: jrzaurin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def threshold_img(img):
    """
    Simple wrap-up function for cv2.threshold()
    """

    is_color = len(img.shape) == 3
    is_grey  = len(img.shape) == 2

    t = threshold_value(img)

    if is_color:
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    elif is_grey:
        gray = img.copy()

    blurred = cv2.GaussianBlur(gray, (3, 3), 0)
    (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV)

    return thresh
split.py 文件源码 项目:captcha-python-test 作者: hanc00l 项目源码 文件源码 阅读 53 收藏 0 点赞 0 评论 0
def clean_bg(filename):
    image = cv2.imread(filename,0)
    new_image = np.zeros(image.shape, np.uint8)
    height,width= image.shape

    for i in range(height):
        for j in range(width):
            new_image[i,j] = image[i,j]#max(image[i,j][0],image[i,j][1],image[i,j][2])

    ret,new_image = cv2.threshold(new_image,180,255,cv2.THRESH_BINARY_INV)
    border_width = 2
    new_image = new_image[border_width:height-border_width,border_width:width-border_width]
    #cv2.imshow('invImage',new_image)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    return new_image
vis_tools.py 文件源码 项目:baxter 作者: destrygomorphous 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def make_mask(limb, filename):

    """
    Given a limb (right or left) and a name to save to
    (in the baxter_tools/share/images/ directory),
    create a mask of any dark objects in the image from the camera
    and save it.
    """

    image_sub = rospy.Subscriber(
        '/cameras/' + limb + '_hand_camera/image',Image,callback)


    try:
        bridge = CvBridge()
        cv_image = bridge.imgmsg_to_cv2(img, "bgr8")
    except CvBridgeError, e:
        print e

    msk = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV)
    return msk
tslsr.py 文件源码 项目:Speedy-TSLSR 作者: talhaHavadar 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55):
    """
        Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer.
        @threshold percentage of similarity
    """
    __readDigitTemplates()
    digit = digit.copy()
    if digit.shape[2] == 3:
        digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY)
    ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV)
    bestDigit = -1
    if method == REC_METHOD_TEMPLATE_MATCHING:
        bestMatch = None
        for i in range(len(__DIGIT_TEMPLATES)):
            template = __DIGIT_TEMPLATES[i].copy()

            if digit.shape[1] < template.shape[1]:
                template = cv2.resize(template, (digit.shape[1], digit.shape[0]))
            else:
                digit = cv2.resize(digit, (template.shape[1], template.shape[0]))

            result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED)
            (_, max_val, _, max_loc) = cv2.minMaxLoc(result)
            if bestMatch is None or max_val > bestMatch:
                bestMatch = max_val
                bestDigit = i
                print("New Best Match:", bestMatch, bestDigit)

    if (bestMatch * 100) >= threshold:
        return (bestDigit, bestMatch * 100)

    return (-1, 0)
imgproc_RotOtsu.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def parse_arg(argv):
    '''
    parsing cli arguments
    '''
    parser = argparse.ArgumentParser(description='image processing: rotation and binarization.')
    parser.add_argument('-i', '--inpf', default='IMG_0531-2.jpg', help='input image file')
    parser.add_argument('-r', '--rotate', type=float, default=0, help='the angle (deg) of rotation (CCW).')
    parser.add_argument('-b', '--binarize', type=int, default=0, help='method of binarize. 0->THRESH_BINARY, 1->THRESH_BINARY_INV')
    return parser.parse_args(argv[1:])
forest.py 文件源码 项目:checkmymeat 作者: kendricktan 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def predict(url):
    global model, COOKED_PHRASES, RAW_PHRASES   
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    # Get features
    features = describe(image, mask)

    # Predict it
    result = model.predict([features])
    probability = model.predict_proba([features])[0][result][0]        
    state = le.inverse_transform(result)[0]

    phrase = ''

    if 'cook' in state:
        phrase = COOKED_PHRASES[int(random.random()*len(COOKED_PHRASES))]
    elif 'raw' in state:
        phrase = RAW_PHRASES[int(random.random()*len(RAW_PHRASES))]

    return {'type': state, 'confidence': probability, 'phrase': phrase}
ogrid_node.py 文件源码 项目:lqRRT 作者: jnez71 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def make_image(self, image_path):
        img = cv2.imread(image_path, 0)
        if img is None:
            print "Image not found at '{}'".format(image_path)
            return

        img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
        _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
        self.img = np.clip(img, -1, 100)
grabcut.py 文件源码 项目:svm-street-detector 作者: morris-frank 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def grabcuthm(im, hm):
    size = hm.shape

    bright = np.amax(hm)

    ret,fgd = cv2.threshold(hm, FGD_BOUND * bright, 1 * bright, cv2.THRESH_BINARY)
    fgd[1:size[0]/2] = 0
    fgd[1:size[0], 1:size[1]/4] = 0
    fgd[1:size[0], size[1]*3/4:size[1]] = 0

    ret,pr_fgd = cv2.threshold(hm, FGD_BGD_SEP * bright, 1 * bright, cv2.THRESH_BINARY)
    pr_fgd -= fgd

    ret, bgd = cv2.threshold(hm, BGD_BOUND * bright, 1 * bright, cv2.THRESH_BINARY_INV)
    bgd[size[0]/3:size[0]] = 0

    ret,pr_bgd = cv2.threshold(hm, FGD_BGD_SEP * bright, 1 * bright, cv2.THRESH_BINARY_INV)
    pr_bgd -= bgd

    mask = cv2.GC_BGD * bgd + cv2.GC_FGD * fgd + cv2.GC_PR_BGD * pr_bgd + cv2.GC_PR_FGD * pr_fgd
    mask = mask.astype(np.uint8, copy=False)

    bgdModel = np.zeros((1,65),np.float64)
    fgdModel = np.zeros((1,65),np.float64)

    rect = (0, im.shape[:2][0]/2, im.shape[:2][1], im.shape[:2][0])

    cv2.grabCut(im, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)
    mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')

    return mask2
testing.py 文件源码 项目:retinal-exudates-detection 作者: getsanjeev 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def extract_bv(image):          
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
training.py 文件源码 项目:retinal-exudates-detection 作者: getsanjeev 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def extract_bv(image):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
shadow_removal.py 文件源码 项目:vehicle_detection 作者: AuzanMuh 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def hsvPassShadowRemoval(src, shadowThreshold):
    blurLevel = 3
    height, width = src.shape[:2]
    imgHSV = cv2.cvtColor(src, cv2.COLOR_RGB2HSV)
    gaussianBlur = cv2.GaussianBlur(imgHSV, (blurLevel, blurLevel), 0)
    hueImg, satImg, valImg = cv2.split(gaussianBlur)

    NSVDI = np.zeros((height, width, 1), np.uint8)
    count = height * width
    with np.errstate(divide='ignore'):
        # for i in range(0, height):
        #    for j in range(0, width):
        #       sat = int(satImg[i, j])
        #       val = int(valImg[i, j])
        #       NSVDI[i, j] = (satImg[i, j] - valImg[i, j]) / ((satImg[i, j] + valImg[i, j]) * 1.0)
        NSVDI = (satImg + valImg) / ((satImg - valImg) * 1)
    thresh = np.sum(NSVDI)
    avg = thresh / (count * 1.0)

    # for i in range(0, height):
    #    for j in range(0, width):
    #       if NSVDI[i, j] >= 0.25:
    #           hueImg[i, j] = 255
    #           satImg[i, j] = 255
    #           valImg[i, j] = 255
    #       else:
    #           hueImg[i, j] = 0
    #           satImg[i, j] = 0
    #           valImg[i, j] = 0

    if shadowThreshold is None:
        avg = avg
    else:
        avg = shadowThreshold

    np.where(NSVDI > avg, 255, 0)
    _, threshold = cv2.threshold(NSVDI, avg, 255, cv2.THRESH_BINARY_INV)

    output = threshold
    return output
page_dewarp.py 文件源码 项目:page_dewarp 作者: mzucker 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_mask(name, small, pagemask, masktype):

    sgray = cv2.cvtColor(small, cv2.COLOR_RGB2GRAY)

    if masktype == 'text':

        mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY_INV,
                                     ADAPTIVE_WINSZ,
                                     25)

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.1, 'thresholded', mask)

        mask = cv2.dilate(mask, box(9, 1))

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.2, 'dilated', mask)

        mask = cv2.erode(mask, box(1, 3))

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.3, 'eroded', mask)

    else:

        mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY_INV,
                                     ADAPTIVE_WINSZ,
                                     7)

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.4, 'thresholded', mask)

        mask = cv2.erode(mask, box(3, 1), iterations=3)

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.5, 'eroded', mask)

        mask = cv2.dilate(mask, box(8, 2))

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.6, 'dilated', mask)

    return np.minimum(mask, pagemask)
eclipse_renderer.py 文件源码 项目:eclipse2017 作者: google 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def find_contours(self, image):
        image = qimage_to_numpy(image)
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        #_,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV)
        # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
        # dilated = cv2.dilate(gray,kernel,iterations = 13)
        contours, hierarchy = cv2.findContours(gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        return contours
helpers.py 文件源码 项目:opencv-helpers 作者: abarrak 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def fixed_threshold(image, thresh_value=120, above_thresh_assigned=255, thresh_style=cv.THRESH_BINARY_INV):
  '''
  :param thres_value: the threshold constant.
  :param thresh_style: can be any of the following.
                      cv.THRESH_BINARY
                      cv2.THRESH_BINARY_INV
                      cv2.THRESH_TRUNC
                      cv2.THRESH_TOZERO
                      cv2.THRESH_TOZERO_INV
   '''
  ret, thresholded = cv.threshold(image, thresh_value, above_thresh_assigned, thresh_style)
  return thresholded
helpers.py 文件源码 项目:opencv-helpers 作者: abarrak 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def otsu_threshold(image, above_thresh_assigned=255, thresh_style=cv.THRESH_BINARY_INV):
  ''' apply otsu's binarization algorithm to find optimal threshold value. '''
  ret, thresholded = cv.threshold(image, 0, above_thresh_assigned, thresh_style  + cv.THRESH_OTSU)
  return { 'otsu_thresh': ret, 'image': thresholded }


问题


面经


文章

微信
公众号

扫码关注公众号