def canny(im, blur=3):
im_blur = cv2.blur(im, (blur,blur))
return cv2.Canny(im_blur, 50, 150, blur)
python类blur()的实例源码
def canny(img, lowThreshold):
"""
Performs canny edge detection on the provided grayscale image.
:param img: a grayscale image
:param lowThreshold: threshold for the canny operation
:return: binary image containing the edges found by canny
"""
dst = np.zeros(img.shape, dtype=img.dtype)
cv2.blur(img, (3, 3), dst)
# canny recommends that the high threshold be 3 times the low threshold
# the kernel size is 3 as defined above
return cv2.Canny(dst, lowThreshold, lowThreshold * 3, dst, 3)
def LinearMotionBlur3C(img):
"""Performs motion blur on an image with 3 channels. Used to simulate
blurring caused due to motion of camera.
Args:
img(NumPy Array): Input image with 3 channels
Returns:
Image: Blurred image by applying a motion blur with random parameters
"""
lineLengths = [3,5,7,9]
lineTypes = ["right", "left", "full"]
lineLengthIdx = np.random.randint(0, len(lineLengths))
lineTypeIdx = np.random.randint(0, len(lineTypes))
lineLength = lineLengths[lineLengthIdx]
lineType = lineTypes[lineTypeIdx]
lineAngle = randomAngle(lineLength)
blurred_img = img
for i in xrange(3):
blurred_img[:,:,i] = PIL2array1C(LinearMotionBlur(img[:,:,i], lineLength, lineAngle, lineType))
blurred_img = Image.fromarray(blurred_img, 'RGB')
return blurred_img
def computeWeightsLocallyNormalized(I, centered_gradient=True, norm_radius=45):
h,w = I.shape[:2]
if centered_gradient:
gy,gx = np.gradient(I)[:2]
gysq = (gy**2).mean(axis=2) if gy.ndim > 2 else gy**2
gxsq = (gx**2).mean(axis=2) if gx.ndim > 2 else gx**2
gxsq_local_mean = cv2.blur(gxsq, ksize=(norm_radius, norm_radius))
gysq_local_mean = cv2.blur(gysq, ksize=(norm_radius, norm_radius))
w_horizontal = np.exp( - gxsq * 1.0/(2*np.maximum(1e-6, gxsq_local_mean)))
w_vertical = np.exp( - gysq * 1.0/(2*np.maximum(1e-6, gysq_local_mean)))
else:
raise Exception("NotImplementedYet")
return w_horizontal, w_vertical
def generate_defect_img(img,min_num,max_num,label_img):
# label_img = np.zeros_like(img)
# if random.random > 0.9:
# generate_crack(img,label_img,1,(0.01,0.05),6,(0.1,0.8))
#method_list = [blur,scratch,spot]
method_list = [blur,scratch,spot]
num = random.randint(min_num,max_num)
print num
for i in range(num):
fun_index = random.randint(0,len(method_list)-1)
method_list[fun_index](img,label_img)
# generate_blur(img,1,(0.05,0.3),(0.05,0.3))
# generate_scratch(img,1,(0.001,0.05),20,(0.01,0.4))
# generate_spot(img,1,(0.001,0.008),1.5)
#return label_img
arch_light_track.py 文件源码
项目:Vision_Processing-2016
作者: Sabercat-Robotics-4146-FRC
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def get_bounding_rect( cap, win_cap, win, upper, lower):
msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3)
im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
if len(contours) > 0:
areas = [cv2.contourArea(c) for c in contours] # get the area of each contour
max_index = np.argmax(areas) # get the index of the largest contour by area
cnts = contours[max_index] # get the largest contout by area
cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image
x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour
cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box
cv2.imshow( "debug.", win_cap )
try:
self.smt_dash.putNumber('vis_x', x)
self.smt_dash.putNumber('vis_y', y)
self.smt_dash.putNumber('vis_w', w)
self.smt_dash.putNumber('vis_h', h)
except Exception:
pass
def weightedLoss(y_true, y_pred):
# compute weights
# a = cv2.blur(y_true, (11,11))
# ind = (a > 0.01) * (a < 0.99)
# ind = ind.astype(np.float32)
# weights = np.ones(a.shape)
a = K.pool2d(y_true, (11,11), strides=(1, 1), padding='same', data_format=None, pool_mode='avg')
ind = K.cast(K.greater(a, 0.01), dtype='float32') * K.cast(K.less(a, 0.99), dtype='float32')
weights = K.cast(K.greater_equal(a, 0), dtype='float32')
w0 = K.sum(weights)
# w0 = weights.sum()
weights = weights + ind * 2
w1 = K.sum(weights)
# w1 = weights.sum()
weights = weights / w1 * w0
return weightedBCELoss2d(y_true, y_pred, weights) + weightedSoftDiceLoss(y_true, y_pred, weights)
processing.py 文件源码
项目:Ultras-Sound-Nerve-Segmentation---Kaggle
作者: Simoncarbo
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def postprocess_masks(masks,new_size = None):
if new_size is not None:
masks_p = np.ndarray((masks.shape[0], masks.shape[1]) + new_size, dtype=np.float32)
for i in range(masks.shape[0]):
masks_p[i, 0] = cv2.resize(masks[i, 0], (new_size[1],new_size[0]), interpolation=cv2.INTER_LINEAR)
else:
masks_p = masks.copy()
masks_p[np.where(np.sum(np.sum(masks_p,axis = -1),axis = -1)[:,0]<4000)] = 0
for i in range(masks.shape[0]):
masks_p[i,0] = cv2.blur(masks_p[i,0],(30,30))
masks_p = np.round(masks_p)
for i in range(masks.shape[0]):
blurred = cv2.blur(masks_p[i,0],(100,100))
masks_p[(i,0)+np.where(blurred<0.1)] =0
masks_p[np.where(np.sum(np.sum(masks_p,axis = -1),axis = -1)[:,0]<1500)] = 0
return masks_p.astype(np.uint8)
def __blur(src, type, radius):
"""Softens an image using one of several filters.
Args:
src: The source mat (numpy.ndarray).
type: The blurType to perform represented as an int.
radius: The radius for the blur as a float.
Returns:
A numpy.ndarray that has been blurred.
"""
if(type is BlurType.Box_Blur):
ksize = int(2 * round(radius) + 1)
return cv2.blur(src, (ksize, ksize))
elif(type is BlurType.Gaussian_Blur):
ksize = int(6 * round(radius) + 1)
return cv2.GaussianBlur(src, (ksize, ksize), round(radius))
elif(type is BlurType.Median_Filter):
ksize = int(2 * round(radius) + 1)
return cv2.medianBlur(src, ksize)
else:
return cv2.bilateralFilter(src, -1, round(radius), round(radius))
def movement(mat_1,mat_2):
mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1))
_,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0)
mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1))
_,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0)
mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2))
_,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0)
mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4)))
_, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:return True #If there were any movements
return False #if not
#Pedestrian Recognition Thread
def partial_blur(img, points, kenel_size = 9, type = 1):
"""
Partial Gaussian blur within convex hull of points.
Args:
type = 0 for Gaussian blur
type = 1 for average blur
"""
points = cv2.convexHull(points)
copy_img = img.copy()
black = (0, 0, 0)
if type:
cv2.blur(img, (kenel_size, kenel_size))
else:
cv2.GaussianBlur(img, (kenel_size, kenel_size), 0)
cv2.fillConvexPoly(copy_img, points, color = black)
for row in range(img.shape[:2][0]):
for col in range(img.shape[:2][1]):
if numpy.array_equal(copy_img[row][col], black):
copy_img[row][col] = blur_img[row][col]
return copy_img
def correct_colours(im1, im2, landmarks1):
"""
Attempt to change the colouring of im2 to match that of im1.
It does this by dividing im2 by a gaussian blur of im2, and then multiplying
by a gaussian blur of im1.
"""
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
def get_frame(self):
ret,frame = self.cap.read(self.camera_id)
self.frame = cv2.resize(frame,None,fx=self.img_zoomx, fy=self.img_zoomy, \
interpolation = cv2.INTER_AREA)
self.frame = cv2.blur(self.frame, (3,3))
self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.colors = []
if self.escaneando:
self.draw_osd(self.frame)
return self.frame
def test_box_filter_reflect_101(self):
I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32)
r = 2
ret1 = cv.smooth.box_filter(I, r, normalize=True)
ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_DEFAULT)
self.assertTrue(np.array_equal(ret1, ret2))
def test_box_filter_reflect(self):
I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32)
r = 2
ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='reflect')
ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_REFLECT)
self.assertTrue(np.array_equal(ret1, ret2))
def test_box_filter_edge(self):
I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32)
r = 2
ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='edge')
ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_REPLICATE)
self.assertTrue(np.array_equal(ret1, ret2))
def test_box_filter_zero(self):
I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32)
r = 2
ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='zero')
ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_CONSTANT)
self.assertTrue(np.array_equal(ret1, ret2))
def sobel(im, dx=1, dy=1, blur=3):
if blur is None or blur == 0:
blur_im = im
else:
blur_im = cv2.GaussianBlur(im, (blur,blur), 0)
return cv2.Sobel(blur_im, cv2.CV_8U, dx, dy)
def sobel_threshold(im, dx=1, dy=1, blur=3, threshold=10):
return (sobel(im, dx=dx, dy=dy, blur=blur) > threshold).astype(np.uint8) * 255
def alpha_image(img, points, blur=0, dilate=0):
mask = mask_from_points(img.shape[:2], points)
if dilate > 0:
kernel = np.ones((dilate, vdilate), np.uint8)
mask = cv2.dilate(mask, kernel)
if blur > 0:
mask = cv2.blur(mask, (blur, blur))
return np.dstack((img, mask))
def averageBlur(srcpath, dstpath):
img = cv2.imread(srcpath, 0) #????????
blur = cv2.blur(img,(3,5))#????3*5
# cv2.imwrite(dstpath, blur)
plt.subplot(1,2,1),plt.imshow(img,'gray')
plt.subplot(1,2,2),plt.imshow(blur,'gray')
plt.show()
# ????
def gaussianBlur(srcpath, dstpath):
img = cv2.imread(srcpath, 0) #????????
blur = cv2.GaussianBlur(img,(5,5),0)
# cv2.imwrite(dstpath, blur)
plt.subplot(1,2,1),plt.imshow(img,'gray')
plt.subplot(1,2,2),plt.imshow(blur,'gray')
plt.show()
# ????
def medianBlur(srcpath, dstpath):
img = cv2.imread(srcpath, 0)
blur = cv2.medianBlur(img, 3)
# cv2.imshow(dstpath, img)
# cv2.imwrite(dstpath, blur)
plt.subplot(1,2,1),plt.imshow(img,'gray')
plt.subplot(1,2,2),plt.imshow(blur,'gray')
plt.show()
# ????
def bilateralFilter(srcpath, dstpath):
img = cv2.imread(srcpath, 0)
# 9---??????
# ??????????????????????????
blur = cv2.bilateralFilter(img,9,75,75)
# cv2.imwrite(dstpath, blur)
plt.subplot(1,2,1),plt.imshow(img,'gray')
plt.subplot(1,2,2),plt.imshow(blur,'gray')
plt.show()
def blur_image(image):
if random.randint(0, 10) == 0:
intencity = random.randint(1, 5)
image = cv2.blur(image, (intencity, intencity))
return image
def pretty_blur_map(blur_map, sigma=5):
abs_image = numpy.log(numpy.abs(blur_map).astype(numpy.float32))
cv2.blur(abs_image, (sigma, sigma))
return cv2.medianBlur(abs_image, sigma)
def oneFileComparison(filename):
gaussianFilterVals = list(range(1,30,2))
gaussianFilterVals.insert(0,0)
img = _openImage(filename)
fn = lambda x, img=img: img if x==0 else cv2.blur(img, (x, x) )
_procedure(fn, gaussianFilterVals, gaussianFilterVals, 'artificial blur', filename)
GuidedFilter.py 文件源码
项目:Color-Attenuation-Prior-Dehazing
作者: jevonswang
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _computeCoefficients(self, p):
r = self._radius
I = self._I
Ir, Ig, Ib = I[:, :, 0], I[:, :, 1], I[:, :, 2]
p_mean = cv2.blur(p, (r, r))
Ipr_mean = cv2.blur(Ir * p, (r, r))
Ipg_mean = cv2.blur(Ig * p, (r, r))
Ipb_mean = cv2.blur(Ib * p, (r, r))
Ipr_cov = Ipr_mean - self._Ir_mean * p_mean
Ipg_cov = Ipg_mean - self._Ig_mean * p_mean
Ipb_cov = Ipb_mean - self._Ib_mean * p_mean
ar = self._Irr_inv * Ipr_cov + self._Irg_inv * Ipg_cov + self._Irb_inv * Ipb_cov
ag = self._Irg_inv * Ipr_cov + self._Igg_inv * Ipg_cov + self._Igb_inv * Ipb_cov
ab = self._Irb_inv * Ipr_cov + self._Igb_inv * Ipg_cov + self._Ibb_inv * Ipb_cov
b = p_mean - ar * self._Ir_mean - ag * self._Ig_mean - ab * self._Ib_mean
ar_mean = cv2.blur(ar, (r, r))
ag_mean = cv2.blur(ag, (r, r))
ab_mean = cv2.blur(ab, (r, r))
b_mean = cv2.blur(b, (r, r))
return ar_mean, ag_mean, ab_mean, b_mean
def alpha_image(img, points, blur=0, dilate=0):
mask = mask_from_points(img.shape[:2], points)
if dilate > 0:
kernel = np.ones((dilate, vdilate), np.uint8)
mask = cv2.dilate(mask, kernel)
if blur > 0:
mask = cv2.blur(mask, (blur, blur))
return np.dstack((img, mask))
def make_mask(path):
original_name = path.split('/')[-1]
img, points = load_image_points(path)
if img is None:
return None
if not os.path.exists('eyes'):
os.makedirs('eyes')
if not os.path.exists('masks'):
os.makedirs('masks')
masked = alpha_image(img, points, 1)
masked = fill(masked, points[LEFT_EYE_POINTS])
masked = fill(masked, points[RIGHT_EYE_POINTS])
mask_path = 'masks/{}.mask.png'.format(original_name)
cv2.imwrite(mask_path, masked)
args = ['convert', mask_path, '-trim', '+repage', '-resize', '830x830', '-gravity', 'center', '-background', 'transparent', '-extent', '850x1100', mask_path + '.tmp.png']
subprocess.call(args)
args = ['convert', mask_path+'.tmp.png', '-bordercolor', 'none', '-border', '2', '-background', 'black', '-alpha', 'background', '-channel', 'A', '-blur', '3x3', '-level', '0,01%', mask_path+'.tmp2.png']
subprocess.call(args)
args = ['convert', 'bg.png', mask_path+'.tmp2.png', '-gravity', 'center', '-composite', '-matte', mask_path]
subprocess.call(args)
os.remove(mask_path+'.tmp.png')
os.remove(mask_path+'.tmp2.png')
left_eye_path = 'eyes/{}.left.png'.format(original_name)
left_eye = alpha_image(img, points[LEFT_EYE_POINTS], dilate=5, blur=1)
cv2.imwrite(left_eye_path, left_eye)
subprocess.call(['mogrify', '-trim', '+repage', left_eye_path])
right_eye_path = 'eyes/{}.right.png'.format(original_name)
right_eye = alpha_image(img, points[RIGHT_EYE_POINTS], dilate=5, blur=1)
cv2.imwrite(right_eye_path, right_eye)
subprocess.call(['mogrify', '-trim', '+repage', right_eye_path])