def Laplacian(self,img):
karr = np.array([0, 1, 0, 1, -4, 1, 0, 1, 0])
kernel1 = karr.reshape(3, 3)
kernel2 = kernel1.transpose()
img1 = cv2.filter2D(img, -1, kernel1)
img2 = cv2.filter2D(img, -1, kernel2)
dImg = img1 + img2
return dImg
python类filter2D()的实例源码
def LoG(self, img, size, sigma):
kernel = self.GenerateLoG(size, sigma)
dImg = cv2.filter2D(img, -1, kernel)
return dImg
def getFilterImage(img):
kernel = np.ones((5, 5), np.float32) / 25
filtered = cv2.filter2D(img, -1, kernel)
return filtered
def process_frame(self):
super().process_frame()
if self.cur_frame_number == self.ground_truth_frame_numbers[self.gt_frame_ix]:
# we have struck upon a frame we can evaluate against ground truth
gt_file_path = os.path.join(self.ground_truth_folder, self.ground_truth_frame_filenames[self.gt_frame_ix])
gt_mask = cv2.imread(gt_file_path, cv2.IMREAD_GRAYSCALE)
self.gt_frame_ix += 1 # advance for next hit
test_mask = self.mask.copy()
test_mask[test_mask < MaskLabel.PERSISTENCE_LABEL.value] = 0
test_mask[test_mask >= MaskLabel.PERSISTENCE_LABEL.value] = 1
gt_mask[gt_mask == 255] = 1
test_mask = test_mask.astype(np.int8) # to allow subtraction
errors = test_mask - gt_mask
false_positives = errors.copy()
false_negatives = errors.copy()
false_positives[false_positives == -1] = 0
false_negatives[false_negatives == 1] = 0
n_fp = false_positives.sum()
n_fn = -false_negatives.sum()
penalty_map = cv2.filter2D(gt_mask, cv2.CV_32FC1, self.smoothing_kernel)
cv2.normalize(penalty_map, penalty_map, 0, 1.0, cv2.NORM_MINMAX)
weighted_fn = (penalty_map[false_negatives == -1]).sum()
penalty_map = penalty_map.max() - penalty_map # invert
weighted_fp = (penalty_map[false_positives == 1]).sum()
self.cum_fp += n_fp
self.cum_fn += n_fn
self.cum_wfn += weighted_fn
self.cum_wfp += weighted_fp
self.tested_frame_coutner += 1
def gauss(img, size=7):
kernel = np.ones((size, size), np.float32)/(size * size)
return cv2.filter2D(img, -1, kernel)
def enhance(img):
kernel = np.array([[-1,0,1],[-2,0,2],[1,0,1]])
return cv2.filter2D(img, -1, kernel)
def convolutional_blur(img):
# simple 2D convolutional image filter / averaging
kernel = np.ones((3,3),np.float32)/25 #creates a 3X3 kernel of ones
dst = cv2.filter2D(img,-1,kernel)
return dst
def randomFilter(img, limit=0.5, u=0.5):
if random.random() < u:
height, width, channel = img.shape
alpha = limit * random.uniform(0, 1)
##kernel = np.ones((5,5),np.float32)/25
kernel = np.ones((3, 3), np.float32) / 9 * 0.2
# type = random.randint(0,1)
# if type==0:
# kernel = np.ones((3,3),np.float32)/9*0.2
# if type==1:
# kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])*0.5
# kernel = alpha *sharp +(1-alpha)*blur
# kernel = np.random.randn(5, 5)
# kernel = kernel/np.sum(kernel*kernel)**0.5
img = alpha * cv2.filter2D(img, -1, kernel) + (1 - alpha) * img
img = np.clip(img, 0., 1.)
return img
##https://github.com/pytorch/vision/pull/27/commits/659c854c6971ecc5b94dca3f4459ef2b7e42fb70
## color augmentation
# brightness, contrast, saturation-------------
# from mxnet code, see: https://github.com/dmlc/mxnet/blob/master/python/mxnet/image.py
# def to_grayscle(img):
# blue = img[:,:,0]
# green = img[:,:,1]
# red = img[:,:,2]
# grey = 0.299*red + 0.587*green + 0.114*blue
# return grey
edge_detection.py 文件源码
项目:DAVIS-2016-Chanllege-Solution
作者: tangyuhao
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def show_edges(img_path):
'''
input: the image path
output: none
function: show the input image and the edges
'''
img = cv2.imread(img_path)
RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# kernel = np.ones((5,5),np.float32)/25
# dst = cv2.filter2D(img,-1,kernel)
edges = cv2.Canny(gray_image,100,200)
plt.subplot(121),plt.imshow(RGB_img,cmap='gray',vmin=0,vmax=255)
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()
def deconvolveLucy(self, image, continue_processing, signal_status_update):
# create the kernel
kernel = self.calculateKernel()
# flip the kernel for the convolution
kernel_flipped_vertically = np.flipud(kernel)
kernel_flipped = np.fliplr(kernel_flipped_vertically)
# set input image as initial guess
recent_reconstruction = np.copy(image)
# recursively calculate the maximum likelihood solution
for i in range(self.iterations):
if continue_processing[0] == False:
return "aborted"
percentage_finished = round(100. * float(i) / float(self.iterations))
status = "deconvolving: " + str(percentage_finished) + "%"
signal_status_update.emit(status)
# convolve the recent reconstruction with the kernel
convolved_recent_reconstruction = cv2.filter2D(recent_reconstruction,
-1,
kernel_flipped)
# calculate the correction array
correction = image / convolved_recent_reconstruction
# get infinite values (from divisions by zero)
infinite_values = np.invert(np.isfinite(correction))
#set infinite values to zero because according pixels are black
correction[infinite_values] = 0.
# convolve the correction
convolved_correction = cv2.filter2D(correction,
-1,
kernel)
recent_reconstruction *= convolved_correction
# print(recent_reconstruction)
return recent_reconstruction
## create a kernel image with a psf
# @todo: enable passing of psf
# @return kernel as numpy array
def convolve_gabor(bd, image_min, image_max, scales):
"""
Convolves an image with a series of Gabor kernels
Args:
bd (2d array)
image_min (int or float)
image_max (int or float)
scales (1d array like)
"""
if bd.dtype != 'uint8':
bd = np.uint8(rescale_intensity(bd,
in_range=(image_min,
image_max),
out_range=(0, 255)))
# Each set of Gabor kernels
# has 8 orientations.
out_block = np.empty((8*len(scales),
bd.shape[0],
bd.shape[1]), dtype='float32')
ki = 0
for scale in scales:
# Check for even or
# odd scale size.
if scale % 2 == 0:
ssub = 1
else:
ssub = 0
gabor_kernels = prep_gabor(kernel_size=(scale-ssub, scale-ssub))
for kernel in gabor_kernels:
# TODO: pad array?
out_block[ki] = cv2.filter2D(bd, cv2.CV_32F, kernel)
ki += 1
return out_block
def getFeatureMaps(image, k, mapp):
kernel = np.array([[-1., 0., 1.]], np.float32)
height = image.shape[0]
width = image.shape[1]
assert(image.ndim==3 and image.shape[2])
numChannels = 3 #(1 if image.ndim==2 else image.shape[2])
sizeX = width // k
sizeY = height // k
px = 3 * NUM_SECTOR
p = px
stringSize = sizeX * p
mapp['sizeX'] = sizeX
mapp['sizeY'] = sizeY
mapp['numFeatures'] = p
mapp['map'] = np.zeros((mapp['sizeX']*mapp['sizeY']*mapp['numFeatures']), np.float32)
dx = cv2.filter2D(np.float32(image), -1, kernel) # np.float32(...) is necessary
dy = cv2.filter2D(np.float32(image), -1, kernel.T)
arg_vector = np.arange(NUM_SECTOR+1).astype(np.float32) * np.pi / NUM_SECTOR
boundary_x = np.cos(arg_vector)
boundary_y = np.sin(arg_vector)
# 200x speedup
r, alfa = func1(dx, dy, boundary_x, boundary_y, height, width, numChannels) #with @jit
# ~0.001s
nearest = np.ones((k), np.int)
nearest[0:k//2] = -1
w = np.zeros((k, 2), np.float32)
a_x = np.concatenate((k/2 - np.arange(k/2) - 0.5, np.arange(k/2,k) - k/2 + 0.5)).astype(np.float32)
b_x = np.concatenate((k/2 + np.arange(k/2) + 0.5, -np.arange(k/2,k) + k/2 - 0.5 + k)).astype(np.float32)
w[:, 0] = 1.0 / a_x * ((a_x*b_x) / (a_x+b_x))
w[:, 1] = 1.0 / b_x * ((a_x*b_x) / (a_x+b_x))
'''
# original implementation
mapp['map'] = func2(dx, dy, boundary_x, boundary_y, r, alfa, nearest, w, k, height, width, sizeX, sizeY, p, stringSize) #func2 without @jit #
'''
# 500x speedup
mapp['map'] = func2(dx, dy, boundary_x, boundary_y, r, alfa, nearest, w, k, height, width, sizeX, sizeY, p, stringSize) #with @jit
# ~0.001s
return mapp