python类filter2D()的实例源码

EdgeHistogramComputer.py 文件源码 项目:imgpedia 作者: scferrada 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def compute(self, frame):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        descriptor = []
        dominantGradients = np.zeros_like(frame)
        maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
        maxGradient = np.absolute(maxGradient)
        for k in range(1,len(self.kernels)):
            kernel = self.kernels[k]
            gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
            gradient = np.absolute(gradient)
            np.maximum(maxGradient, gradient, maxGradient)
            indices = (maxGradient == gradient)
            dominantGradients[indices] = k

        frameH, frameW = frame.shape
        for row in range(self.rows):
            for col in range(self.cols):
                mask = np.zeros_like(frame)
                mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
                hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
                hist = cv2.normalize(hist, None)
                descriptor.append(hist)
        return np.concatenate([x for x in descriptor])
OrientedGradientsComputer.py 文件源码 项目:imgpedia 作者: scferrada 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compute(self, frame):
        #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        dx = cv2.filter2D(frame, cv2.CV_32F, self.xkernel)
        dy = cv2.filter2D(frame, cv2.CV_32F, self.ykernel)
        orientations = np.zeros_like(dx)
        magnitudes = np.zeros_like(dx)
        cv2.cartToPolar(dx,dy, magnitudes,orientations)
        descriptor = []
        frameH, frameW = frame.shape
        mask_threshold = magnitudes <= self.threshold
        for row in range(self.rows):
            for col in range(self.cols):
                mask = np.zeros_like(frame)
                mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 1
                mask[mask_threshold] = 0
                a_, b_ = mask.shape
                hist = cv2.calcHist([orientations], self.channel, mask, [self.bins], self.range)
                hist = cv2.normalize(hist, None)
                descriptor.append(hist)
        return np.concatenate([x for x in descriptor])
ImageAugmenter.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 66 收藏 0 点赞 0 评论 0
def apply_motion_blur(image, kernel_size, strength = 1.0):
    """Applies motion blur on image 
    """
    # generating the kernel
    kernel_motion_blur = np.zeros((kernel_size, kernel_size))
    kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
    kernel_motion_blur = kernel_motion_blur / kernel_size

    rotation_kernel = np.random.uniform(0, 360)
    kernel_motion_blur = rotate(kernel_motion_blur, rotation_kernel)
    #cv2.imshow("kernel", cv2.resize(kernel_motion_blur, (100, 100)))
    kernel_motion_blur *= strength

    # applying the kernel to the input image
    output = cv2.filter2D(image, -1, kernel_motion_blur)
    return output
preprocess.py 文件源码 项目:Magic-Pixel 作者: zhwhong 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def generalBlur(srcpath, dstpath):
    img = cv2.imread(srcpath, 0) #????????
    img1 = np.float32(img) #??????
    kernel = np.ones((5,5),np.float32)/25

    dst = cv2.filter2D(img1,-1,kernel)
    #cv2.filter2D(src,dst,kernel,auchor=(-1,-1))???
    #?????????????
    #?????-1??????????plt.figure()
    plt.subplot(1,2,1), plt.imshow(img1,'gray')
    # plt.savefig('test1.jpg')
    plt.subplot(1,2,2), plt.imshow(dst,'gray')
    # plt.savefig('test2.jpg')
    plt.show()

# ????
chessboard.py 文件源码 项目:cvcalib 作者: Algomorph 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def compute_inital_corner_likelihood(image):
    likelihoods = []
    for prototype in ck.CORNER_KERNEL_PROTOTYPES:
        filter_responses = [cv2.filter2D(image, ddepth=cv2.CV_64F, kernel=kernel) for kernel in prototype]
        fA, fB, fC, fD = filter_responses
        mean_response = (fA + fB + fC + fD) / 4.
        minAB = np.minimum(fA, fB)
        minCD = np.minimum(fC, fD)
        diff1 = minAB - mean_response
        diff2 = minCD - mean_response
        # For an ideal corner, the response of {A,B} should be greater than the mean response of {A,B,C,D},
        # while the response of {C,D} should be smaller, and vice versa for flipped corners.
        likelihood1 = np.minimum(diff1, -diff2)
        likelihood2 = np.minimum(-diff1, diff2)  # flipped case
        likelihoods.append(likelihood1)
        likelihoods.append(likelihood2)
    corner_likelihood = np.max(likelihoods, axis=0)
    return corner_likelihood
pyramid.py 文件源码 项目:AlphaLogo 作者: gigaflw 项目源码 文件源码 阅读 136 收藏 0 点赞 0 评论 0
def compute_grad(self):
        """
        precompute gradient's magnitude and angle of pyramid
            where angle is between (0, 2?)
        """

        for oct_ind, layer_ind, layer in self.enumerate():
            # todo: better kernel can be used?
            grad_x = cv2.filter2D(layer, cv2.CV_64F, np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]))
            grad_y = cv2.filter2D(layer, cv2.CV_64F, np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]))
            grad_mag = np.sqrt(grad_x**2 + grad_y**2)
            grad_ang = np.arctan2(grad_y, grad_x)  # each element in (-?, ?)
            grad_ang %= TAU  # (-?, 0) is moved to (?, 2*?)

            self._grad_mag[oct_ind][layer_ind] = grad_mag
            self._grad_ang[oct_ind][layer_ind] = grad_ang
make_samples.py 文件源码 项目:segmenty 作者: paulfitz 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def motion_blur(img):
    size = random.randint(3, 15)
    # generating the kernel
    kernel_motion_blur = np.zeros((size, size))
    x0 = int((size-1)/2)
    y0 = int((size-1)/2)
    dx = 0
    dy = 0
    while dx == 0 and dy == 0:
        dx = random.randint(-1, 1)
        dy = random.randint(-1, 1)
    ct = 0
    for k in range(-size, size):
        x = x0 + k * dx
        y = y0 + k * dy
        if x >= 0 and y >= 0 and x < size and y < size:
            kernel_motion_blur[x, y] = 1
            ct += 1
    kernel_motion_blur = kernel_motion_blur / ct
    # applying the kernel to the input image
    output = cv2.filter2D(img, -1, kernel_motion_blur)
    return output
spfunctions.py 文件源码 项目:spfeas 作者: jgrss 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_mag_avg(img):

    img = np.sqrt(img)

    kernels = get_kernels()

    mag = np.zeros(img.shape, dtype='float32')

    for kernel_filter in kernels:

        gx = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[1], borderType=cv2.BORDER_REFLECT)
        gy = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[0], borderType=cv2.BORDER_REFLECT)

        mag += cv2.magnitude(gx, gy)

    mag /= len(kernels)

    return mag
ChessBoard.py 文件源码 项目:ChessBot 作者: pakhandi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sharpen(self, testImg):
        # Create the identity filter, but with the 1 shifted to the right!
        kernel = np.zeros((9, 9), np.float32)
        kernel[4, 4] = 2.0  # Identity, times two!

        # Create a box filter:
        boxFilter = np.ones((9, 9), np.float32) / 81.0

        # Subtract the two:
        kernel = kernel - boxFilter

        custom = cv2.filter2D(testImg, -1, kernel)

        return testImg

    # driver function to process a single image
edge_detection.py 文件源码 项目:DAVIS-2016-Chanllege-Solution 作者: tangyuhao 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_edges(img_path):
    '''
    input: the image path
    output: a numpy ndarray of the edges in this image 
    '''

    img = cv2.imread(img_path)
    RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # kernel = np.ones((5,5),np.float32)/25
    # dst = cv2.filter2D(img,-1,kernel)


    edges = cv2.Canny(gray_image,100,200)
    return edges
motionDetect.py 文件源码 项目:Image-Processing-and-Steganogrphy 作者: motkeg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def FrameSmoth(frame):

    ''' In this stage of algorithm we impliment the 'bluring' procces -
        the function clculate the score of each frame of the interval (0.25 s) by execute the gaussian.
        The goal of this proccess is to avoid 'False Positive'  of ths frames hat we recognized as diffrent. ''' 

    gaussian =cv2.getGaussianKernel(5,10)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray=cv2.filter2D(gray,-1,gaussian)
    #gray=signal.convolve2d(gray, gaussian,mode='same')
    gray=normalize(gray)
    return gray
get_motion_salient_boxes.py 文件源码 项目:Deep360Pilot-optical-flow 作者: yenchenlin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def motion_saliency(flow_mag, n):
    prior = flow_mag / np.max(flow_mag)
    filt = np.ones((n, n))/n/n

    likeli = cv2.filter2D(flow_mag.astype(np.float32), -1, filt)
    likeli = (likeli - likeli.min()) / (likeli.max() - likeli.min())

    return likeli * prior
get_motion_features.py 文件源码 项目:Deep360Pilot-optical-flow 作者: yenchenlin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def motion_saliency(flow_mag, n):
    prior = flow_mag / np.max(flow_mag)
    filt = np.ones((n, n))/n/n

    likeli = cv2.filter2D(flow_mag.astype(np.float32), -1, filt)
    likeli = (likeli - likeli.min()) / (likeli.max() - likeli.min())

    return likeli * prior
divide_area_motion_salient_boxes.py 文件源码 项目:Deep360Pilot-optical-flow 作者: yenchenlin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def motion_saliency(flow_mag, n):
    prior = flow_mag / np.max(flow_mag)
    filt = np.ones((n, n))/n/n

    likeli = cv2.filter2D(flow_mag.astype(np.float32), -1, filt)
    likeli = (likeli - likeli.min()) / (likeli.max() - likeli.min())

    return likeli * prior
Sharpen.py 文件源码 项目:DVD2FHD 作者: AMakeApp 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def sharpen_filter(image):
    img = cv2.imread(image)

    kernel = np.array([[-1,-1,-1,-1,-1],
                        [-1,2,2,2,-1],
                        [-1,2,8,2,-1],
                        [-1,2,2,2,-1],
                        [-1,-1,-1,-1,-1]]) / 8.0
    output = cv2.filter2D(img, -1, kernel)

    os.remove(image)
    cv2.imwrite(image, output)
skin_detector.py 文件源码 项目:pycolor_detection 作者: parth1993 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def grab_cut_mask(img_col, mask, debug=False):
    assert isinstance(img_col, numpy.ndarray), 'image must be a numpy array'
    assert isinstance(mask, numpy.ndarray), 'mask must be a numpy array'
    assert img_col.ndim == 3, 'skin detection can only work on color images'
    assert mask.ndim == 2, 'mask must be 2D'

    kernel = numpy.ones((50, 50), numpy.float32) / (50 * 50)
    dst = cv2.filter2D(mask, -1, kernel)
    dst[dst != 0] = 255
    free = numpy.array(cv2.bitwise_not(dst), dtype=numpy.uint8)

    if debug:
        scripts.display('not skin', free)
        scripts.display('grabcut input', mask)

    grab_mask = numpy.zeros(mask.shape, dtype=numpy.uint8)
    grab_mask[:, :] = 2
    grab_mask[mask == 255] = 1
    grab_mask[free == 255] = 0

    if numpy.unique(grab_mask).tolist() == [0, 1]:
        logger.debug('conducting grabcut')
        bgdModel = numpy.zeros((1, 65), numpy.float64)
        fgdModel = numpy.zeros((1, 65), numpy.float64)

        if img_col.size != 0:
            mask, bgdModel, fgdModel = cv2.grabCut(img_col, grab_mask, None, bgdModel, fgdModel, 5,
                                                   cv2.GC_INIT_WITH_MASK)
            mask = numpy.where((mask == 2) | (mask == 0), 0, 1).astype(numpy.uint8)
        else:
            logger.warning('img_col is empty')

    return mask
AnalizeFrame.py 文件源码 项目:serbian-alpr 作者: golubaca 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def smooth(self, image):
        """
        Smooth image using kernel
        :param image:
        :return:
        """
        smoothed = cv2.filter2D(
            image, -1, np.ones((self.kernel, self.kernel), np.float32) / self.kernel**2)
        return smoothed
saliency.py 文件源码 项目:saliency 作者: shuuchen 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def makeGaborFilter(dims, lambd, theta, psi, sigma, gamma):
    """
        Creates a Gabor filter (an array) with parameters labmbd, theta,
        psi, sigma, and gamma of size dims.  Returns a function which 
        can be passed to `features' as `channel' argument.
        In some versions of OpenCV, sizes greater than (11,11) will lead
        to segfaults (see http://code.opencv.org/issues/2644).
    """
    def xpf(i,j):
        return i*math.cos(theta) + j*math.sin(theta)
    def ypf(i,j):
        return -i*math.sin(theta) + j*math.cos(theta)
    def gabor(i,j):
        xp = xpf(i,j)
        yp = ypf(i,j)
        return math.exp(-(xp**2 + gamma**2*yp**2)/2*sigma**2) * math.cos(2*math.pi*xp/lambd + psi)

    halfwidth = dims[0]/2
    halfheight = dims[1]/2

    kernel = numpy.array([[gabor(halfwidth - i,halfheight - j) for j in range(dims[1])] for i in range(dims[1])])

    def theFilter(image):
        return cv2.filter2D(src = image, ddepth = -1, kernel = kernel, )

    return theFilter
ft_method.py 文件源码 项目:saliency_method 作者: lee88688 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def ft_saliency(img_lab):
    blur_img_lab = cv2.filter2D(img_lab, -1, get_filter_kernel(5, 5))
    blur_lm = blur_img_lab[:, :, 0].mean()
    blur_am = blur_img_lab[:, :, 1].mean()
    blur_bm = blur_img_lab[:, :, 2].mean()
    blur_sm = np.sqrt((blur_img_lab[:, :, 0] - blur_lm) ** 2 + (blur_img_lab[:, :, 1] - blur_am) ** 2 + (
        blur_img_lab[:, :, 2] - blur_bm) ** 2)

    return normalize(blur_sm)
page.py 文件源码 项目:doc2text 作者: jlsutherland 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def process_image(orig_im):

    # Load and scale down image.
    scale, im = downscale_image(orig_im)

    # Reduce noise.
    blur = reduce_noise_raw(im.copy())

    # Edged.
    edges = auto_canny(blur.copy())

    # Reduce noise and remove thin borders.
    debordered = reduce_noise_edges(edges.copy())

    # Dilate until there are a few components.
    dilation, rects, num_tries = find_components(debordered, 16)

    # Find the final crop.
    final_rect = find_final_crop(dilation, rects)

    # Crop the image and smooth.
    cropped = crop_image(orig_im, final_rect, scale)
    kernel = np.ones((5, 5), np.float32) / 25
    smooth2d = cv2.filter2D(cropped, -1, kernel=kernel)

    return (smooth2d, num_tries)
gabor.py 文件源码 项目:computer-vision-techniques 作者: Shikhargupta 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def process(img, filters):
    accum = np.zeros_like(img)
    for kern in filters:
        fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
        np.maximum(accum, fimg, accum)
    return accum
HandRecognition.py 文件源码 项目:hand-gesture-recognition-opencv 作者: mahaveerverma 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def hand_threshold(frame_in,hand_hist):
    frame_in=cv2.medianBlur(frame_in,3)
    hsv=cv2.cvtColor(frame_in,cv2.COLOR_BGR2HSV)
    hsv[0:int(cap_region_y_end*hsv.shape[0]),0:int(cap_region_x_begin*hsv.shape[1])]=0 # Right half screen only
    hsv[int(cap_region_y_end*hsv.shape[0]):hsv.shape[0],0:hsv.shape[1]]=0
    back_projection = cv2.calcBackProject([hsv], [0,1],hand_hist, [00,180,0,256], 1)
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_elem_size,morph_elem_size))
    cv2.filter2D(back_projection, -1, disc, back_projection)
    back_projection=cv2.GaussianBlur(back_projection,(gaussian_ksize,gaussian_ksize), gaussian_sigma)
    back_projection=cv2.medianBlur(back_projection,median_ksize)
    ret, thresh = cv2.threshold(back_projection, hsv_thresh_lower, 255, 0)

    return thresh

# 3. Find hand contour
preprocess.py 文件源码 项目:Fingerprint-Recognition 作者: zhangzimou 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def GaborFilter_(img,blockSize,wl,dire,sigma=20):
    imgout=np.zeros_like(img)
    O=block_view(imgout,(blockSize,blockSize))
    B=block_view(img,(blockSize,blockSize))
    for w,d,o,b in zip(wl,dire,O,B):
        kernel=map(lambda w,d:cv2.getGaborKernel((blockSize,blockSize),sigma,d,w,1),w,d)
        o[:,:]=np.asarray(map(lambda x,kernel: cv2.filter2D(x,-1,kernel),b,kernel))
    return imgout
preprocessing.py 文件源码 项目:Fingerprint-Recognition 作者: zhangzimou 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def GaborFilter_(img,blockSize,wl,dire,sigma=20):
    imgout=np.zeros_like(img)
    O=block_view(imgout,(blockSize,blockSize))
    B=block_view(img,(blockSize,blockSize))
    for w,d,o,b in zip(wl,dire,O,B):
        kernel=map(lambda w,d:cv2.getGaborKernel((blockSize,blockSize),sigma,d,w,1),w,d)
        o[:,:]=np.asarray(map(lambda x,kernel: cv2.filter2D(x,-1,kernel),b,kernel))
    return imgout

#def applyKernel(img,kernel,i,j):
preprocess.py 文件源码 项目:Fingerprint-Recognition 作者: zhangzimou 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def GaborFilter_(img,blockSize,wl,dire,sigma=20):
    imgout=np.zeros_like(img)
    O=block_view(imgout,(blockSize,blockSize))
    B=block_view(img,(blockSize,blockSize))
    for w,d,o,b in zip(wl,dire,O,B):
        kernel=map(lambda w,d:cv2.getGaborKernel((blockSize,blockSize),sigma,d,w,1),w,d)
        o[:,:]=np.asarray(map(lambda x,kernel: cv2.filter2D(x,-1,kernel),b,kernel))
    return imgout
initial.py 文件源码 项目:Fingerprint-Recognition 作者: zhangzimou 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def GaborFilter_(img,blockSize,wl,dire,sigma=20):
    imgout=np.zeros_like(img)
    O=block_view(imgout,(blockSize,blockSize))
    B=block_view(img,(blockSize,blockSize))
    for w,d,o,b in zip(wl,dire,O,B):
        kernel=map(lambda w,d:cv2.getGaborKernel((blockSize,blockSize),sigma,d,w,1),w,d)
        o[:,:]=np.asarray(map(lambda x,kernel: cv2.filter2D(x,-1,kernel),b,kernel))
    return imgout

#def applyKernel(img,kernel,i,j):
image.py 文件源码 项目:digit-ocr 作者: Nozdi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def resize_digits(digits):
    digits = map(itemgetter('image'), sorted(digits, key=itemgetter('x')))
    blur_kernel = np.ones((4, 4), np.float32)/(4*4)
    erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    return [
        cv2.resize(
            cv2.bitwise_not(
                cv2.filter2D(
                    cv2.erode(digit, erode_kernel, iterations=1),
                    -1, blur_kernel)
            ),
            (20, 20))
        for digit in digits]
BaseSpatialFilter.py 文件源码 项目:NGImageProcessor 作者: artzers 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def Mean(self, img, size):
        kernel = np.ones((size, size), np.int32)
        dImg = cv2.filter2D(img, -1, kernel)
        return dImg
BaseSpatialFilter.py 文件源码 项目:NGImageProcessor 作者: artzers 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def Gaussian(self, img, size, sigma):
        kernel = self.GenerateGaussian(size, sigma)
        dImg = cv2.filter2D(img, -1, kernel)
        return dImg
BaseSpatialFilter.py 文件源码 项目:NGImageProcessor 作者: artzers 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def Sobel(self, img):
        karr = np.array([-1, -2 , -1, 0, 0, 0, 1, 2, 1])
        kernel1 = karr.reshape(3,3)
        kernel2 = kernel1.transpose()
        img1 = cv2.filter2D(img, -1, kernel1)
        img2 = cv2.filter2D(img, -1, kernel2)
        dImg = img1 + img2
        return dImg


问题


面经


文章

微信
公众号

扫码关注公众号