python类bilateralFilter()的实例源码

find_circles.py 文件源码 项目:eclipse2017 作者: google 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def findCircles(fname, image, circles_directory):
    f = os.path.join(circles_directory, os.path.basename(fname) + ".pkl")
    if os.path.exists(f):
        circles = pickle.load(open(f, "rb"))
        return circles
    image_cols, image_rows, _ = image.shape

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.bilateralFilter(gray, 9, 75, 75)
    gray = cv2.addWeighted(gray, 1.5, blurred, -0.5, 0)
    gray = cv2.bilateralFilter(gray, 9, 75, 75)

    # # detect circles in the image
    dp = 1
    c1 = 100
    c2 = 15
    print "start hough", fname
    circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT, dp, image_cols / 8, param1=c1, param2=c2)
    print "finish hough", fname
    pickle.dump(circles, open(f, "wb"))
    if circles is None or not len(circles):
        return None
    return circles
process_images.py 文件源码 项目:Emotion-Recognition 作者: HashCode55 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def process_image(img = list()):
    """
    Extracts faces from the image using haar cascade, resizes and applies filters. 
    :param img: image matrix. Must be grayscale
    ::returns faces:: list contatining the cropped face images
    """
    face_cascade = cv2.CascadeClassifier('/Users/mehul/opencv-3.0.0/build/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')   

    faces_location = face_cascade.detectMultiScale(img, 1.3, 5)
    faces = []

    for (x,y,w,h) in faces_location:
        img = img[y:(y+h), x:(x+w)]
        try:
            img = cv2.resize(img, (256, 256))
        except:
            exit(1)
        img = cv2.bilateralFilter(img,15,10,10)
        img = cv2.fastNlMeansDenoising(img,None,4,7,21)
        faces.append(img)

    return faces
find_circles.py 文件源码 项目:eclipse2017 作者: google 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def findCircles(image):
    image_cols, image_rows, _ = image.shape

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    first, second = getRescaledDimensions(gray.shape[1], gray.shape[0], HD_MAX_X, HD_MAX_Y)
    gray = cv2.resize(gray, (first, second))
    blurred = cv2.bilateralFilter(gray, 9, 75, 75)
    gray = cv2.addWeighted(gray, 1.5, blurred, -0.5, 0)
    gray = cv2.bilateralFilter(gray, 9, 75, 75)

    # # detect circles in the image
    dp = 1
    c1 = 100
    c2 = 15
    circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT, dp, second / 8, param1=c1, param2=c2)
    if not len(circles):
        return None
    return circles[0][0]
BlinkPipeline.py 文件源码 项目:cozmo_beyond 作者: PeterMitrano 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __blur(src, type, radius):
        """Softens an image using one of several filters.
        Args:
            src: The source mat (numpy.ndarray).
            type: The blurType to perform represented as an int.
            radius: The radius for the blur as a float.
        Returns:
            A numpy.ndarray that has been blurred.
        """
        if(type is BlurType.Box_Blur):
            ksize = int(2 * round(radius) + 1)
            return cv2.blur(src, (ksize, ksize))
        elif(type is BlurType.Gaussian_Blur):
            ksize = int(6 * round(radius) + 1)
            return cv2.GaussianBlur(src, (ksize, ksize), round(radius))
        elif(type is BlurType.Median_Filter):
            ksize = int(2 * round(radius) + 1)
            return cv2.medianBlur(src, ksize)
        else:
            return cv2.bilateralFilter(src, -1, round(radius), round(radius))
filter.py 文件源码 项目:PicFilter 作者: dhuadaar 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def render(self,frame):
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 7
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        # -- STEP 5 --
        # convert back to color so that it can be bit-ANDed with color image
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        final = cv2.bitwise_and(img_color, img_edge)
        return cv2.medianBlur(final,7)
filter.py 文件源码 项目:PicFilter 作者: dhuadaar 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def render(self,frame):
        canvas = cv2.imread("pen.jpg", cv2.CV_8UC1)
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 3
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 3)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 3)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        return  cv2.multiply(cv2.medianBlur(img_edge,7), canvas, scale=1./256)
preprocess.py 文件源码 项目:Magic-Pixel 作者: zhwhong 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def bilateralFilter(srcpath, dstpath):
    img = cv2.imread(srcpath, 0)
    # 9---??????
    # ??????????????????????????
    blur = cv2.bilateralFilter(img,9,75,75)
    # cv2.imwrite(dstpath, blur)
    plt.subplot(1,2,1),plt.imshow(img,'gray')
    plt.subplot(1,2,2),plt.imshow(blur,'gray')
    plt.show()
postprocess.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def upsample_single(a, size):
  """Upsample single image, with bilateral filtering.
  Args:
    a: [H', W', 3]
    size: [W, H]
  Returns:
    b: [H, W, 3]
  """
  interpolation = cv2.INTER_LINEAR
  b = cv2.resize(a, size, interpolation=interpolation)
  b = cv2.bilateralFilter(b, 5, 10, 10)
  return b
fg_model_eval.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def upsample_single(self, a, size):
    """Upsample single image, with bilateral filtering.
    Args:
      a: [H', W', 3]
      size: [W, H]
    Returns:
      b: [H, W, 3]
    """
    interpolation = cv2.INTER_LINEAR
    b = cv2.resize(a, size, interpolation=interpolation)
    b = cv2.bilateralFilter(b, 5, 10, 10)
    return b
navigation.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_mask(self, image):
        """ Build the mask to find the path edges """
        kernel = np.ones((3, 3), np.uint8)
        img = cv2.bilateralFilter(image, 9, 75, 75)
        img = cv2.erode(img, kernel, iterations=1)

        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, self.lower_gray, self.upper_gray)

        mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        mask2 = cv2.erode(mask2, kernel)
        mask2 = cv2.dilate(mask2, kernel, iterations=1)

        return mask2
page.py 文件源码 项目:doc2text 作者: jlsutherland 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def reduce_noise_raw(im):
    bilat = cv2.bilateralFilter(im, 9, 75, 75)
    blur = cv2.medianBlur(bilat, 5)
    return blur
video.py 文件源码 项目:trackingtermites 作者: dmrib 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def apply_filters(self, frame):
        """Apply specified filters to frame.

        Args:
            frame (np.ndarray): frame to be modified.
        Returns:
            n_frame (np.ndarray): modified frame.
        """
        n_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        if 'g-blur' in self.filters:
            n_frame = cv2.GaussianBlur(n_frame, (5,5), 0)
        if 'b-filtering' in self.filters:
            n_frame = cv2.bilateralFilter(n_frame, 9, 75, 75)
        if 't_adaptive' in self.filters:
            n_frame = cv2.adaptiveThreshold(n_frame, 255,
                                            cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                            cv2.THRESH_BINARY, 115, 1)
        if 'otsu' in self.filters:
            _, n_frame = cv2.threshold(n_frame, 125, 255,
                                       cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        if 'canny' in self.filters:
            n_frame = cv2.Canny(n_frame, 100, 200)
        if 'b-subtraction' in self.filters:
            n_frame = self.subtractor.apply(frame)

        n_frame = cv2.cvtColor(n_frame, cv2.COLOR_GRAY2BGR)

        return n_frame
api.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def smooth_image(image, kernel):
    if (kernel < 0):
        kernel = 0
    elif (kernel > 100):
        kernel = 100
    return cv2.bilateralFilter(image, kernel, kernel, kernel)
imgproc_funcfile.py 文件源码 项目:ghetto_omr 作者: pohzhiee 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def outlining(img):
    #kernel size
    kernel_size=3
    #-------------------------------------------------
    #bilateral filter, sharpen, thresh image
    biblur=cv2.bilateralFilter(img,20,175,175)
    sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
    ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)

    #negative and closed image
    inv=cv2.bitwise_not(thresh1)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
    closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
    return closed
solver.py 文件源码 项目:airport 作者: cfircohen 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def PrepareImage(image):
  """Converts color image to black and white"""
  # work on gray scale
  bw = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

  # remove noise, preserve edges
  bw = cv2.bilateralFilter(bw, 9, 75, 75)

  # binary threshold
  bw = cv2.adaptiveThreshold(bw, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                             cv2.THRESH_BINARY, 11, 2)
  return bw
DEMInterp.py 文件源码 项目:UAV-and-TrueOrtho 作者: LeonChen66 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def Dsm_bilatera(Dsm_arr,mask,n):    
    Dsm_arr = cv2.bilateralFilter(Dsm_arr,mask,n,n)

    return Dsm_arr

#show Dsm
DSM_GEN_BD.py 文件源码 项目:UAV-and-TrueOrtho 作者: LeonChen66 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def bilatera(Dsm_arr, mask, n):
    Dsm_arr = cv2.bilateralFilter(Dsm_arr, mask, n, n)

    return Dsm_arr
RegionOfInterest.py 文件源码 项目:DoNotSnap 作者: AVGInnovationLabs 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def roiMask(image, boundaries):
    scale = max([1.0, np.average(np.array(image.shape)[0:2] / 400.0)])
    shape = (int(round(image.shape[1] / scale)), int(round(image.shape[0] / scale)))

    small_color = cv2.resize(image, shape, interpolation=cv2.INTER_LINEAR)

    # reduce details and remove noise for better edge detection
    small_color = cv2.bilateralFilter(small_color, 8, 64, 64)
    small_color = cv2.pyrMeanShiftFiltering(small_color, 8, 64, maxLevel=1)
    small = cv2.cvtColor(small_color, cv2.COLOR_BGR2HSV)

    hue = small[::, ::, 0]
    intensity = cv2.cvtColor(small_color, cv2.COLOR_BGR2GRAY)

    edges = extractEdges(hue, intensity)
    roi = roiFromEdges(edges)
    weight_map = weightMap(hue, intensity, edges, roi)

    _, final_mask = cv2.threshold(roi, 5, 255, cv2.THRESH_BINARY)
    small = cv2.bitwise_and(small, small, mask=final_mask)

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))

    for (lower, upper) in boundaries:
        lower = np.array([lower, 80, 50], dtype="uint8")
        upper = np.array([upper, 255, 255], dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(small, lower, upper)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=3)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
        final_mask = cv2.bitwise_and(final_mask, mask)

    # blur the mask for better contour extraction
    final_mask = cv2.GaussianBlur(final_mask, (5, 5), 0)
    return (final_mask, weight_map, scale)
squeeze.py 文件源码 项目:EvadeML-Zoo 作者: mzweilin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def bilateral_filter_py(imgs, d, sigmaSpace, sigmaColor):
    """
    :param d: Diameter of each pixel neighborhood that is used during filtering. 
        If it is non-positive, it is computed from sigmaSpace.
    :param sigmaSpace: Filter sigma in the coordinate space. 
        A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). 
        When d>0, it specifies the neighborhood size regardless of sigmaSpace. 
        Otherwise, d is proportional to sigmaSpace.
    :param sigmaColor: Filter sigma in the color space. 
        A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting in larger areas of semi-equal color.
    """
    import cv2
    return opencv_wrapper(imgs, cv2.bilateralFilter, [d, sigmaColor, sigmaSpace])
digital_display_ocr.py 文件源码 项目:digital-display-character-rec 作者: upupnaway 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cnvt_edged_image(img_arr, should_save=False):
  # ratio = img_arr.shape[0] / 300.0
  image = imutils.resize(img_arr,height=300)
  gray_image = cv2.bilateralFilter(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),11, 17, 17)
  edged_image = cv2.Canny(gray_image, 30, 200)

  if should_save:
    cv2.imwrite('cntr_ocr.jpg')

  return edged_image
imgproc.py 文件源码 项目:Cerebrum 作者: tyler-cromwell 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def preprocess(frame, width, height, x, y, w, h):
    """
    Preprocesses an image for Face Recognition
    """
    cropped = frame[y: y+h, x: x+w]
    grayed = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
    resized = cv2.resize(grayed, (width, height))
    equalized = cv2.equalizeHist(resized)
    filtered = cv2.bilateralFilter(equalized, 5, 60, 60)
    return filtered
image_preprocessing.py 文件源码 项目:Notes2ppt 作者: gsengupta2810 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def bilateralFilter(img):
  # Bilateral Filtering- highly effective in noise removal while keeping edges sharp
  bilateral= cv2.bilateralFilter(img,9,75,75) 
  return bilateral
image_preprocessing.py 文件源码 项目:Notes2ppt 作者: gsengupta2810 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def smooth_image(img):
  # blur the image to reduce noise 
  dst= median_blur(img)
  dst= gaussian_blur(dst)
  dst= bilateralFilter(dst)
  return dst
# *********************************************************

# ************************** Binarization *****************
unet.py 文件源码 项目:neural-fonts 作者: periannath 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def infer(self, source_obj, embedding_ids, model_dir, save_dir, progress_file):
        source_provider = InjectDataProvider(source_obj, None)

        with open(progress_file, 'a') as f:
            f.write("Start")

        if isinstance(embedding_ids, int) or len(embedding_ids) == 1:
            embedding_id = embedding_ids if isinstance(embedding_ids, int) else embedding_ids[0]
            source_iter = source_provider.get_single_embedding_iter(self.batch_size, embedding_id)
        else:
            source_iter = source_provider.get_random_embedding_iter(self.batch_size, embedding_ids)

        tf.global_variables_initializer().run()
        saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
        self.restore_model(saver, model_dir)

        def save_imgs(imgs, count):
            p = os.path.join(save_dir, "inferred_%04d.png" % count)
            save_concat_images(imgs, img_path=p)
#            print("generated images saved at %s" % p)

        def save_sample(imgs, code):
            p = os.path.join(save_dir, "inferred_%s.png" % code)
            save_concat_images(imgs, img_path=p)
#            print("generated images saved at %s" % p)

        count = 0
        batch_buffer = list()
        for labels, codes, source_imgs in source_iter:
            fake_imgs = self.generate_fake_samples(source_imgs, labels)[0]
            for i in range(len(fake_imgs)):
                # Denormalize image
                gray_img = np.uint8(fake_imgs[i][:,:,0]*127.5+127.5)
                pil_img = Image.fromarray(gray_img, 'L')
                # Apply bilateralFilter
                cv_img = np.array(pil_img)
                cv_img = bilateralFilter(cv_img, 5, 10, 10)
                pil_img = Image.fromarray(cv_img)
                # Increase contrast
                enhancer = ImageEnhance.Contrast(pil_img)
                en_img = enhancer.enhance(1.5)
                # Normalize image
                fake_imgs[i][:,:,0] = Image.fromarray(np.array(en_img)/127.5 - 1.)
#                save_sample(fake_imgs[i], codes[i])
            merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
            batch_buffer.append(merged_fake_images)
            if len(batch_buffer) == 1:
                save_sample(batch_buffer, codes[0])
                batch_buffer = list()
            count += 1
        if batch_buffer:
            # last batch
            save_imgs(batch_buffer, count)
        with open(progress_file, 'a') as f:
            f.write("Done")
crop.py 文件源码 项目:neural-fonts 作者: periannath 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def crop_image_uniform(src_dir, dst_dir):
    f = open("399-uniform.txt", "r")
    if not os.path.exists(dst_dir):
        os.makedirs(dst_dir)
    for page in range(1,4):
        img = Image.open( src_dir + "/" + str(page) +"-uniform.png").convert('L')

        width, height = img.size
        cell_width = width/float(cols)
        cell_height = height/float(rows)
        header_offset = height/float(rows) * header_ratio
        width_margin = cell_width * 0.10
        height_margin = cell_height * 0.10

        for j in range(0,rows):
            for i in range(0,cols):
                left = i * cell_width
                upper = j * cell_height + header_offset
                right = left + cell_width
                lower = (j+1) * cell_height

                center_x = (left + right) / 2
                center_y = (upper + lower) / 2

                crop_width = right - left - 2*width_margin
                crop_height = lower - upper - 2*height_margin

                size = 0
                if crop_width > crop_height:
                    size = crop_height/2
                else:
                    size = crop_width/2

                left = center_x - size;
                right = center_x + size;
                upper = center_y - size;
                lower = center_y + size;

                code = f.readline()
                if not code:
                    break
                else:
                    name = dst_dir + "/uni" + code.strip() + ".png"
                    cropped_image = img.crop((left, upper, right, lower))
                    cropped_image = cropped_image.resize((128,128), Image.LANCZOS)
                    # Increase constrast
                    enhancer = ImageEnhance.Contrast(cropped_image)
                    cropped_image = enhancer.enhance(1.5)
                    opencv_image = np.array(cropped_image)
                    opencv_image = bilateralFilter(opencv_image, 9, 30, 30)
                    cropped_image = Image.fromarray(opencv_image)
                    cropped_image.save(name)
        print("Processed uniform page " + str(page))
camera_reader_and_beautify.py 文件源码 项目:Girl-s-Camera 作者: SHANEGU56 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def face_beautify(image,cascade, cascade2, processed_image):
    for (x, y, w, h) in processed_image:
        image1 = image[y:y+h, x:x+w]
        image_high = image1
        eyes = cascade2.detectMultiScale(image1)
        for (ex, ey, ew, eh) in eyes:
            center_x = ex + ew * 0.5
            center_y = ey + eh * 0.5
            eyes1 = image1[ey:ey+eh, ex:ex+ew]
            eyes2 = eyes1
            kernel_radius = min(ew, eh) * 0.4
            for r in range(eh):
                for c in range(ew):
                    diff_x = c - ew*0.5
                    diff_y = r - eh*0.5
                    distance = math.sqrt(diff_x * diff_x + diff_y * diff_y)
                    p_x = 0
                    p_y = 0
                    if distance <= kernel_radius:
                        re = (1 - math.cos(distance / kernel_radius * 2 * math.pi)) * 2.5
                        p_x = -diff_x * (re / kernel_radius)
                        p_y = -diff_y * (re / kernel_radius)
                    if p_x < 0 : 
                        p_x  = 0
                    if p_y < 0 : 
                        p_y = 0
                    eyes2[r,c] = eyes1[int(r + p_y),int(c + p_x)]
            image1[ey:ey+eh, ex:ex+ew] = eyes2  
        image_high1 = cv2.bilateralFilter(image_high, 15, 37, 37)
        #image_high2 = image_high1 - image1 + 128 
        image_high3 = cv2.GaussianBlur(image_high1,(1, 1),0)
        #image_high4 = image1 + 2 * image_high3 - 255
        #final = image1 * 0.45 + image_high4 * 0.55
        c_x = x + w * 0.5
        c_y = y + h * 0.5
        radius = min(w, h) * 2
        image_high4 = image_high3
        for row in range(h):
            for col in range(w):
                diff_x = col - w * 0.5
                diff_y = col - h * 0.5
                distance = math.sqrt(square(col - w*0.5) + square(row - h*0.5))
                m_x = 0
                m_y = 0
                if distance <= radius:
                    re = (1 - math.cos(distance / radius * 2 * math.pi)) * 2
                    m_x = -diff_x * (re / radius)
                    m_y = -diff_y * (re / radius)
                if m_x < 0:
                    m_x = 0
                if m_y < 0:
                    m_y = 0
                image_high4[row,col] = image_high3[int(row + m_y), int(col + m_x)]
        image[y:y+h, x:x+w] = image_high4
    return image
camera_reader.py 文件源码 项目:Girl-s-Camera 作者: SHANEGU56 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def face_beautify(image,cascade, cascade2, processed_image):
    for (x, y, w, h) in processed_image:
        image1 = image[y:y+h, x:x+w]
        image_high = image1
        eyes = cascade2.detectMultiScale(image1)
        for (ex, ey, ew, eh) in eyes:
            center_x = ex + ew * 0.5
            center_y = ey + eh * 0.5
            eyes1 = image1[ey:ey+eh, ex:ex+ew]
            eyes2 = eyes1
            kernel_radius = min(ew, eh) * 0.4
            for r in range(eh):
                for c in range(ew):
                    diff_x = c - ew*0.5
                    diff_y = r - eh*0.5
                    distance = math.sqrt(diff_x * diff_x + diff_y * diff_y)
                    p_x = 0
                    p_y = 0
                    if distance <= kernel_radius:
                        re = (1 - math.cos(distance / kernel_radius * 2 * math.pi)) * 2.5
                        p_x = -diff_x * (re / kernel_radius)
                        p_y = -diff_y * (re / kernel_radius)
                    if p_x < 0 : 
                        p_x  = 0
                    if p_y < 0 : 
                        p_y = 0
                    eyes2[r,c] = eyes1[int(r + p_y),int(c + p_x)]
            image1[ey:ey+eh, ex:ex+ew] = eyes2  
        image_high1 = cv2.bilateralFilter(image_high, 15, 37, 37)
        #image_high2 = image_high1 - image1 + 128 
        image_high3 = cv2.GaussianBlur(image_high1,(1, 1),0)
        #image_high4 = image1 + 2 * image_high3 - 255
        #final = image1 * 0.45 + image_high4 * 0.55
        c_x = x + w * 0.5
        c_y = y + h * 0.5
        radius = min(w, h) * 2
        image_high4 = image_high3
        for row in range(h):
            for col in range(w):
                diff_x = col - w * 0.5
                diff_y = col - h * 0.5
                distance = math.sqrt(square(col - w*0.5) + square(row - h*0.5))
                m_x = 0
                m_y = 0
                if distance <= radius:
                    re = (1 - math.cos(distance / radius * 2 * math.pi)) * 2
                    m_x = -diff_x * (re / radius)
                    m_y = -diff_y * (re / radius)
                if m_x < 0:
                    m_x = 0
                if m_y < 0:
                    m_y = 0
                image_high4[row,col] = image_high3[int(row + m_y), int(col + m_x)]
        image[y:y+h, x:x+w] = image_high4
    return image
faceRecognize.py 文件源码 项目:Girl-s-Camera 作者: SHANEGU56 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def face_beautify(image,cascade, cascade2, processed_image):
    image1 = image[y:y+height, x:x+width]
    image_high = image1
    eyes = cascade2.detectMultiScale(image1)
    for (ex, ey, ew, eh) in eyes:
        center_x = ex + ew * 0.5
        center_y = ey + eh * 0.5
        eyes1 = image1[ey:ey+eh, ex:ex+ew]
        eyes2 = eyes1
        kernel_radius = min(ew, eh) * 0.4
        for r in range(eh):
            for c in range(ew):
                diff_x = c - ew*0.5
                diff_y = r - eh*0.5
                distance = math.sqrt(diff_x * diff_x + diff_y * diff_y)
                p_x = 0
                p_y = 0
                if distance <= kernel_radius:
                    re = (1 - math.cos(distance / kernel_radius * 2 * math.pi)) * 2.5
                    p_x = -diff_x * (re / kernel_radius)
                    p_y = -diff_y * (re / kernel_radius)
                if p_x < 0 : 
                    p_x  = 0
                if p_y < 0 : 
                    p_y = 0
                eyes2[r,c] = eyes1[int(r + p_y),int(c + p_x)]
        image1[ey:ey+eh, ex:ex+ew] = eyes2  
    image_high1 = cv2.bilateralFilter(image_high, 15, 37, 37)
    #image_high2 = image_high1 - image1 + 128 
    image_high3 = cv2.GaussianBlur(image_high1,(1, 1),0)
    #image_high4 = image1 + 2 * image_high3 - 255
    #final = image1 * 0.45 + image_high4 * 0.55
    c_x = x + width * 0.5
    c_y = y + height * 0.5
    radius = min(width, height) * 2
    image_high4 = image_high3
    for row in range(height):
        for col in range(width):
            diff_x = col - width * 0.5
            diff_y = col - height * 0.5
            distance = math.sqrt(square(col - width*0.5) + square(row - height*0.5))
            m_x = 0
            m_y = 0
            if distance <= radius:
                re = (1 - math.cos(distance / radius * 2 * math.pi)) * 2
                m_x = -diff_x * (re / radius)
                m_y = -diff_y * (re / radius)
            if m_x < 0:
                m_x = 0
            if m_y < 0:
                m_y = 0
            image_high4[row,col] = image_high3[int(row + m_y), int(col + m_x)]
    image[y:y+height, x:x+width] = image_high4
    return image
vis_tools.py 文件源码 项目:baxter 作者: destrygomorphous 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def find_black_center(cv_img, msk):

    """
    Given an opencv image containing a dark object on a light background
    and a mask of objects to ignore (a gripper, for instance),
    return the coordinates of the centroid of the largest object
    (excluding those touching edges) and its simplified contour.
    If none detected or problem with centroid, return [(-1, -1), False].
    """

    # Convert to black and white
    (rows, cols, _) = cv_img.shape
    grey_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
    grey_img = cv2.bilateralFilter(grey_img, 11, 17, 17)
    _, outlines = cv2.threshold(
        grey_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # Subtract gripper
    msk_out = cv2.subtract(cv2.bitwise_not(outlines), msk)

    # Remove objects touching edges
    flood_fill_edges(msk_out, 30)

    # Find contours
    _, contours, _ = cv2.findContours(
        msk_out, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    if len(contours) == 0:
        return [(-1, -1), False]

    # Find largest contour
    max_area = 0
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area > max_area:
            contour = cnt
            max_area = area

    # Approximate contour
    epsilon = 0.025 * cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, epsilon, True)

    # Find centroid
    try:
        M = cv2.moments(approx)
        cx = int(M['m10']/M['m00'])
        cy = int(M['m01']/M['m00'])
        return [(cx, cy), approx]
    except ZeroDivisionError:
        return [(-1, -1), False]
trackgesture.py 文件源码 项目:CNNGestureRecognizer 作者: asingh33 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def binaryMask(frame, x0, y0, width, height ):
    global guessGesture, visualize, mod, lastgesture, saveImg

    cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1)
    roi = frame[y0:y0+height, x0:x0+width]

    gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray,(5,5),2)
    #blur = cv2.bilateralFilter(roi,9,75,75)

    th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
    ret, res = cv2.threshold(th3, minValue, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    #ret, res = cv2.threshold(blur, minValue, 255, cv2.THRESH_BINARY +cv2.THRESH_OTSU)

    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True:
        retgesture = myNN.guessGesture(mod, res)
        if lastgesture != retgesture :
            lastgesture = retgesture
            #print lastgesture

            ## Checking for only PUNCH gesture here
            ## Run this app in Prediction Mode and keep Chrome browser on focus with Internet Off
            ## And have fun :) with Dino
            if lastgesture == 3:
                jump = ''' osascript -e 'tell application "System Events" to key code 49' '''
                #jump = ''' osascript -e 'tell application "System Events" to key down (49)' '''
                os.system(jump)
                print myNN.output[lastgesture] + "= Dino JUMP!"

            #time.sleep(0.01 )
            #guessGesture = False
    elif visualize == True:
        layer = int(raw_input("Enter which layer to visualize "))
        cv2.waitKey(1)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res

#%%


问题


面经


文章

微信
公众号

扫码关注公众号