python类THRESH_OTSU的实例源码

find_bibs.py 文件源码 项目:bib-tagger 作者: KateRita 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def find_bibs(image):
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  binary = cv2.GaussianBlur(gray,(5,5),0)
  ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
  #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);

  #lapl = cv2.Laplacian(image,cv2.CV_64F)
  #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
  #blurred = cv2.GaussianBlur(lapl,(5,5),0)
  #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #cv2.imwrite("lapl.jpg", lapl)

  edges = cv2.Canny(image,175,200)
  cv2.imwrite("edges.jpg", edges)
  binary = edges

  cv2.imwrite("binary.jpg", binary)
  contours,hierarchy = find_contours(binary)

  return get_rectangles(contours)
gesture_hci.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
        cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
        crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
        grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)

        _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        cv2.imshow('Thresh', thresh1)
        contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # draw contour on threshold image
        if len(contours) > 0:
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

        return contours, crop_res


# Check ConvexHull  and Convexity Defects
model.py 文件源码 项目:ConditionalGAN 作者: seungjooli 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def detect_edges(images):
        def blur(image):
            return cv2.GaussianBlur(image, (5, 5), 0)

        def canny_otsu(image):
            scale_factor = 255
            scaled_image = np.uint8(image * scale_factor)

            otsu_threshold = cv2.threshold(
                cv2.cvtColor(scaled_image, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0]
            lower_threshold = max(0, int(otsu_threshold * 0.5))
            upper_threshold = min(255, int(otsu_threshold))
            edges = cv2.Canny(scaled_image, lower_threshold, upper_threshold)
            edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)

            return np.float32(edges) * (1 / scale_factor)

        blurred = [blur(image) for image in images]
        canny_applied = [canny_otsu(image) for image in blurred]

        return canny_applied
find_bibs.py 文件源码 项目:bib-tagger 作者: KateRita 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def find_bib(image):
  width, height, depth = image.shape

  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  #gray = cv2.equalizeHist(gray)
  blurred = cv2.GaussianBlur(gray,(5,5),0)

  debug_output("find_bib_blurred", blurred)
  #binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0);
  ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY);
  debug_output("find_bib_binary", binary)
  threshold_contours,hierarchy = find_contours(binary)

  debug_output("find_bib_threshold", binary)

  edges = cv2.Canny(gray,175,200, 3)
  edge_contours,hierarchy = find_contours(edges)

  debug_output("find_bib_edges", edges)

  contours = threshold_contours + edge_contours
  debug_output_contours("find_bib_threshold_contours", image, contours)

  rectangles = get_rectangles(contours)

  debug_output_contours("find_bib_rectangles", image, rectangles)

  potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)]

  debug_output_contours("find_bib_potential_bibs", image, potential_bibs)

  ideal_aspect_ratio = 1.0
  potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio))

  return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]])

#
# Checks that the size and aspect ratio of the contour is appropriate for a bib.
#
test_nox.py 文件源码 项目:Yugioh-bot 作者: will7200 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images)
idcardocr.py 文件源码 项目:idmatch 作者: maddevsio 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def recognize_text(original):
    idcard = original
    gray = cv2.cvtColor(idcard, cv2.COLOR_BGR2GRAY)

    # Morphological gradient:
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    opening = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)

    # Binarization
    ret, binarization = cv2.threshold(opening, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    # Connected horizontally oriented regions
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
    connected = cv2.morphologyEx(binarization, cv2.MORPH_CLOSE, kernel)

    # find countours
    _, contours, hierarchy = cv2.findContours(
        connected, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE
    )
    return contours, hierarchy
preprocessor_eval.py 文件源码 项目:HandwritingRecognition 作者: eng-tsmith 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret3, img_binary = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)

    # invert black = 255
    ret, thresh1 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)

    return thresh1
preprocessor.py 文件源码 项目:HandwritingRecognition 作者: eng-tsmith 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Global
    # ret1, thresh1 = cv.threshold(img_grey, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh1)
    #
    # # Adaptive Mean
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
    # ret2, thresh2 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh2)
    #
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
    # ret3, thresh3 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh3)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret4, img_otsu = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
    ret4, thresh4 = cv.threshold(img_otsu, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh4)

    return thresh4
FaceFinder.py 文件源码 项目:FaceDetect 作者: PCJohn 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def localize(img,model_path):
    sess = tfac.start_sess()
    y,x_hold,y_hold,keep_prob = build_net(sess)
    saver = tf.train.Saver()
    saver.restore(sess,model_path)

    #Run all detection at a fixed size
    img = cv2.resize(img,DET_SIZE)
    mask = np.zeros(img.shape)
    #Run sliding windows of different sizes
    for bx in range(WIN_MIN,WIN_MAX,WIN_STRIDE):
        by = bx
        for i in xrange(0, img.shape[1]-bx, X_STEP):
            for j in xrange(0, img.shape[0]-by, Y_STEP):
                sub_img = cv2.resize(img[i:i+bx,j:j+by],face_ds.IN_SIZE)
                X = sub_img.reshape((1,tfac.dim_prod(face_ds.IN_SIZE)))
                out = y.eval(session=sess,feed_dict={x_hold:X,keep_prob:1})[0]
                if out[0] >= CONF_THRESH:
                    mask[i:i+bx,j:j+by] = mask[i:i+bx,j:j+by]+1

    sess.close()
    mask = np.uint8(255*mask/np.max(mask))
    faces = img*(cv2.threshold(cv2.blur(mask,BLUR_DIM),0,255,cv2.THRESH_OTSU)[1]/255)
    return (faces,mask)
cvmgr.py 文件源码 项目:sia-cog 作者: deepakkumar1984 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def extracttext(imgpath, preprocess):
    if imgpath.startswith('http://') or imgpath.startswith('https://') or imgpath.startswith('ftp://'):
        image = url_to_image(imgpath)
    else:
        image = cv2.imread(imgpath)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    if preprocess == "thresh":
        gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    elif preprocess == "blur":
        gray = cv2.medianBlur(gray, 3)

    filename = "{}.png".format(os.getpid())
    cv2.imwrite(filename, gray)
    text = pytesseract.image_to_string(Image.open(filename))

    os.remove(filename)
    return {"text": text}
image_analysis.py 文件源码 项目:Robo-Plot 作者: JackBuck 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def process_image(image):
    """

    Args:
        image: The image to process

    Returns:
        sub_image: The rotated and extracted.

    """

    # Convert image to black and white - we cannot take the photos in black and white as we
    # must first search for the red triangle.

    if len(image.shape) == 3:
        processed_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    else:
        processed_img = image

    if config.real_hardware:
        num_iterations = 8
    else:
        num_iterations = 8

        processed_img = cv2.GaussianBlur(processed_img, (21, 21), 0)
    _, processed_img = cv2.threshold(processed_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # Put a border around the image to stop the edges of the images creating artifacts.
    padded_image = np.zeros((processed_img.shape[0] + 10, processed_img.shape[1] + 10), np.uint8)
    padded_image[5:processed_img.shape[0]+5, 5:processed_img.shape[1]+5] = processed_img

    kernel = np.array([[0, 1, 1, 1, 0],
                       [1, 1, 1, 1, 1],
                       [1, 1, 1, 1, 1],
                       [1, 1, 1, 1, 1],
                       [0, 1, 1, 1, 0]], np.uint8)

    padded_image = cv2.erode(padded_image, kernel, iterations=num_iterations)
    processed_img = padded_image[25:padded_image.shape[0] - 25, 25:padded_image.shape[1] - 25]

    #cv2.imshow('Padded Image', padded_image)
    #cv2.imshow('Processed image', processed_img)
    #cv2.waitKey(0)



    # Debugging code - useful to show the images are being eroded correctly.
    #spacer = processed_img[:, 0:2].copy()
    #spacer.fill(100)
    #combined_image = np.concatenate((processed_img, spacer), axis=1)
    #combined_image = np.concatenate((combined_image, image), axis=1)
    #cv2.imshow('PreProcessed and Processed Image', combined_image)
    #cv2.waitKey(0)

    # Save sub_image to debug folder if required.
    if __debug__:
        iadebug.save_processed_image(processed_img)

    return processed_img
Tshirt.py 文件源码 项目:virtual-dressing-room 作者: akash0x53 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def detect_shirt(self):


        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)

        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)

        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)

        return self.m,cntr
digit_classifier.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def create_model(self, train_folder):
        """
        Return the training set, its labels and the trained model
        :param train_folder: folder where to retrieve data
        :return: (train_set, train_labels, trained_model)
        """
        digits = []
        labels = []
        for n in range(1, 10):
            folder = train_folder + str(n)
            samples = [pic for pic in os.listdir(folder)
                       if os.path.isfile(os.path.join(folder, pic))]

            for sample in samples:
                image = cv2.imread(os.path.join(folder, sample))
                # Expecting black on white
                image = 255 - cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                _, image = cv2.threshold(image, 0, 255,
                                         cv2.THRESH_BINARY + cv2.THRESH_OTSU)
                feat = self.feature(image)
                digits.append(feat)
                labels.append(n)

        digits = np.array(digits, np.float32)
        labels = np.array(labels, np.float32)
        if cv2.__version__[0] == '2':
            model = cv2.KNearest()
            model.train(digits, labels)
        else:
            model = cv2.ml.KNearest_create()
            model.train(digits, cv2.ml.ROW_SAMPLE, labels)
        return digits, labels, model
monkey.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def do_touch(self):
        width, height = 1080, 1920

        screen = self.device.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        edges = cv2.Canny(gray, 80, 200)
        _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
        contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
        contours.sort(key=lambda cnt: len(cnt), reverse=True)

        rects = []
        for cnt in contours:
            hull = cv2.convexHull(cnt)
            hull_area = cv2.contourArea(hull)
            x,y,w,h = cv2.boundingRect(cnt)
            rect_area = float(w*h)
            if w<20 or h<20 or rect_area<100:
                continue
            if hull_area/rect_area < 0.50:
                continue
            rects.append((x, y, x+w, y+h))
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)

        if not rects:
            x, y = randint(1, width), randint(1, height)
        else:
            x1, y1, x2, y2 = choice(rects)
            x, y = randint(x1, x2), randint(y1, y2)
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

        x, y = self.device.screen2touch(x*2, y*2)
        self.device.touch(x, y)
        cv2.imshow('img', img)
        cv2.waitKey(1)
ocr_identidade.py 文件源码 项目:identidade_ocr 作者: Ronald-TR 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def binarize(img):  #definindo binarização dos cortes
    a = np.asarray(img)
    a = cv2.cvtColor(a, cv2.COLOR_RGB2GRAY)
    ret, imbin = cv2.threshold(a, 127, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
    return Image.fromarray(imbin)
predict.py 文件源码 项目:cervix-roi-segmentation-by-unet 作者: scottykwok 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def to_binary_mask(mask, t=0.00001):
    mask = inverse_preprocessing(mask)

    ### Threshold the RGB image  - This step increase sensitivity
    mask[mask > t] = 255
    mask[mask <= t] = 0

    ### To grayscale and normalize
    mask_gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
    mask_gray = cv2.normalize(src=mask_gray, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)

    ### Auto binary threshold
    (thresh, mask_binary) = cv2.threshold(mask_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    return mask_binary
convert_play.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def crop_image(fname,target_size):
    print('Processing image: %s' % fname)

    #otsu thresholding
    img = Image.open(fname)
    blurred = img.filter(ImageFilter.BLUR)
    ba = np.array(blurred)
    gray_image = cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY)
    retval2, threshold2 = cv2.threshold(gray_image, 125, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    #storing white pixel in  each row and column in two arrays
    #these arrays are later used to find boundaries for cropping image
    row_white_pixel_count=np.count_nonzero(threshold2,axis=1)
    col_white_pixel_count=np.count_nonzero(threshold2,axis=0)

    #find x,y,w,h for cropping image
    y=find_boundary(row_white_pixel_count,col_white_pixel_count.size)
    h=find_boundary_reverse(row_white_pixel_count,col_white_pixel_count.size)
    x=find_boundary(col_white_pixel_count,row_white_pixel_count.size)
    w=find_boundary_reverse(col_white_pixel_count,row_white_pixel_count.size)
    crop_array = ba[y:h, x:w]

    #resize the image
    crop_img=Image.fromarray(crop_array)
    resized = crop_img.resize([target_size, target_size])


    #uncomment below line to see histogram of both white pixel vs rows and white pixel vs columns
    subplots(threshold2, row_white_pixel_count, col_white_pixel_count, crop_img)
    return resized
monkey.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def do_touch(self):
        width, height = 1080, 1920

        screen = self.device.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        edges = cv2.Canny(gray, 80, 200)
        _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
        contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
        contours.sort(key=lambda cnt: len(cnt), reverse=True)

        rects = []
        for cnt in contours:
            hull = cv2.convexHull(cnt)
            hull_area = cv2.contourArea(hull)
            x,y,w,h = cv2.boundingRect(cnt)
            rect_area = float(w*h)
            if w<20 or h<20 or rect_area<100:
                continue
            if hull_area/rect_area < 0.50:
                continue
            rects.append((x, y, x+w, y+h))
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)

        if not rects:
            x, y = randint(1, width), randint(1, height)
        else:
            x1, y1, x2, y2 = choice(rects)
            x, y = randint(x1, x2), randint(y1, y2)
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

        x, y = self.device.screen2touch(x*2, y*2)
        self.device.touch(x, y)
        cv2.imshow('img', img)
        cv2.waitKey(1)
image_dataset.py 文件源码 项目:Comicolorization 作者: DwangoMediaVillage 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def convert_to_linedrawing(self, luminous_image_data):
        linedrawing = cv2.threshold(luminous_image_data, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        return linedrawing
page.py 文件源码 项目:doc2text 作者: jlsutherland 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def process_skewed_crop(image):
    theta = compute_skew(estimate_skew(image))
    ret, thresh = cv2.threshold(image.copy(), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    rotated = rotate(thresh, theta)
    return (rotated, theta)
dictionary.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def poolFrame(self, frame, pool_size=(28, 28)):
        '''
        frame is OpenCV image frame
        return pooled frame
        '''
        if len(frame.shape) == 2:
            gray_frame = frame
        else:
            gray_frame = bgr2gray(frame)
        pool_frame = cv2.resize(gray_frame, pool_size)
        _, pool_frame = cv2.threshold(pool_frame, pool_frame.mean(), 1, cv2.THRESH_OTSU)
        return pool_frame
dictionary.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def poolFrame(self, frame, pool_size=(28, 28)):
        '''
        frame is OpenCV image frame
        return pooled frame
        '''
        if len(frame.shape) == 2:
            gray_frame = frame
        else:
            gray_frame = bgr2gray(frame)
        pool_frame = cv2.resize(gray_frame, pool_size)
        _, pool_frame = cv2.threshold(pool_frame, pool_frame.mean(), 1, cv2.THRESH_OTSU)
        return pool_frame
dictionary.py 文件源码 项目:BAR4Py 作者: bxtkezhan 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def poolFrame(self, frame, pool_size=(28, 28)):
        '''
        frame is OpenCV image frame
        return pooled frame
        '''
        if len(frame.shape) == 2:
            gray_frame = frame
        else:
            gray_frame = bgr2gray(frame)
        pool_frame = cv2.resize(gray_frame, pool_size)
        _, pool_frame = cv2.threshold(pool_frame, pool_frame.mean(), 1, cv2.THRESH_OTSU)
        return pool_frame
imgproc_funcfile.py 文件源码 项目:ghetto_omr 作者: pohzhiee 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def outlining(img):
    #kernel size
    kernel_size=3
    #-------------------------------------------------
    #bilateral filter, sharpen, thresh image
    biblur=cv2.bilateralFilter(img,20,175,175)
    sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
    ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)

    #negative and closed image
    inv=cv2.bitwise_not(thresh1)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
    closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
    return closed
helpers.py 文件源码 项目:opencv-helpers 作者: abarrak 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def otsu_threshold(image, above_thresh_assigned=255, thresh_style=cv.THRESH_BINARY_INV):
  ''' apply otsu's binarization algorithm to find optimal threshold value. '''
  ret, thresholded = cv.threshold(image, 0, above_thresh_assigned, thresh_style  + cv.THRESH_OTSU)
  return { 'otsu_thresh': ret, 'image': thresholded }
main.py 文件源码 项目:ImageSteganography 作者: AhmedAtef07 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def convert_to_binary_image(img_path, preview):
    img = cv2.imread(img_path)
    if preview: _preview_image("Original Message Image", img, keep_open=True)

    img_gray = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    if preview: _preview_image("Gray Scale Message Image", img_gray, keep_open=True)

    (thresh, img_bw) = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    if preview:  _preview_image("Black & White Message Image", img_bw)

    return img_bw
cloud_detection.py 文件源码 项目:pynephoscope 作者: neXyon 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def detect(self, image, mask = None):
        floatimage = np.float32(image)

        fb,fg,fr = cv2.split(floatimage)

        # red-to-blue channel operation
        ra = fr + fb
        rb = fr - fb
        rb[ra > 0] /= ra[ra > 0]
        #mi = np.min(rb)
        #ma = np.max(rb)
        #rb = np.uint8((rb - mi) / (ma - mi) * 255)

        # morphology open
        if self.kernel is None or self.kernel.shape[0] != Configuration.background_rect_size:
            self.kernel = np.ones((Configuration.background_rect_size, Configuration.background_rect_size), np.uint8) * 255

        result = cv2.morphologyEx(rb, cv2.MORPH_OPEN, self.kernel)

        # background subtraction
        # homogeneous background image V
        result = rb - result

        mi = np.min(result)
        ma = np.max(result)
        result = np.uint8((result - mi) / (ma - mi) * 255)

        # adaptive threshold T
        T, _ = cv2.threshold(result[mask == 0], 0, 1, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        # V(i, j) > T
        return np.uint8((T - np.float32(result)) <= 0)
remove_noise.py 文件源码 项目:image_text_reader 作者: yardstick17 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def image_smoothening(img):
    ret1, th1 = cv2.threshold(img, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)
    ret2, th2 = cv2.threshold(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    blur = cv2.GaussianBlur(th2, (1, 1), 0)
    ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    return th3
decaptcha.py 文件源码 项目:Simple-deCAPTCHA 作者: BLKStone 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def th1(self,img):
        # ????
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(img_gray, 0, 255, 
            cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        return thresh

    # ????????2 ?????
split.py 文件源码 项目:captcha-python-test 作者: hanc00l 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def clear_noise(image):
    clear_horizotal_line_noise(image)
    clear_color(image)

    image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    clear_pix_noise(image)
    ret, image = cv2.threshold(image,127,255,cv2.THRESH_BINARY)#+cv2.THRESH_OTSU)

    return image


问题


面经


文章

微信
公众号

扫码关注公众号