def MyDenoiseSobely(path):
img_gray = ToGrayImage(path)
img_mydenoise = MyDenoise(img_gray,5)
img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
_,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
return sobely
python类Sobel()的实例源码
BoundaryExtraction.py 文件源码
项目:SummerProject_MacularDegenerationDetection
作者: WDongYuan
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
BoundaryExtraction.py 文件源码
项目:SummerProject_MacularDegenerationDetection
作者: WDongYuan
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def MyDenoiseSobely(path):
img_gray = ToGrayImage(path)
img_mydenoise = MyDenoise(img_gray,5)
img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
_,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
return sobely
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HSV color space and separate the V channel
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hsv[:,:,1]
s_channel = hsv[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
# Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might
# be beneficial to replace this channel with something else.
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
return color_binary
# Define a function that thresholds the S-channel of HLS
# Use exclusive lower bound (>) and inclusive upper (<=)
def hog(img, bin_n=8, cell_size=4):
img = cv2.resize(img,(128,128))
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = []
mag_cells = []
cellx = celly = cell_size
for i in range(0,img.shape[0]/celly):
for j in range(0,img.shape[1]/cellx):
bin_cells.append(bin[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
mag_cells.append(mag[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
hist_out = np.reshape(hist,(32,32,8))
return hist_out
def _create_derivative(cls, img):
edges = cv2.Canny(img, 175, 320, apertureSize=3)
# Create gradient map using Sobel
sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=-1)
sobely64f = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=-1)
theta = np.arctan2(sobely64f, sobelx64f)
if diagnostics:
cv2.imwrite('edges.jpg',edges)
cv2.imwrite('sobelx64f.jpg', np.absolute(sobelx64f))
cv2.imwrite('sobely64f.jpg', np.absolute(sobely64f))
# amplify theta for visual inspection
theta_visible = (theta + np.pi)*255/(2*np.pi)
cv2.imwrite('theta.jpg', theta_visible)
return (edges, sobelx64f, sobely64f, theta)
def edgedetect(channel):
sobelx = cv2.Sobel(channel, cv2.CV_16S, 1, 0, ksize=3)
sobely = cv2.Sobel(channel, cv2.CV_16S, 0, 1, ksize=3)
sobel = np.hypot(sobelx, sobely)
sobel[sobel > 255] = 255
return sobel
def __filter_candidate(greyscale_image, coord, neighborhood_size):
window = greyscale_image[coord[0] - neighborhood_size:coord[0] + neighborhood_size + 1,
coord[1] - neighborhood_size:coord[1] + neighborhood_size + 1]
grad_x = cv2.Sobel(window, cv2.CV_32FC1, dx=1, dy=0, ksize=3)
grad_y = cv2.Sobel(window, cv2.CV_32FC1, dx=0, dy=1, ksize=3)
grad_mag = np.abs(grad_x) + np.abs(grad_y)
grad_mag_flat = grad_mag.flatten()
orientations_flat = (cv2.phase(grad_x, grad_y) % pi).flatten() # phase accuracy: about 0.3 degrees
hist = (np.histogram(orientations_flat, bins=64, range=(0, pi), weights=grad_mag_flat)[0] /
(neighborhood_size * neighborhood_size))
return hist, grad_mag
def tenengrad(img, ksize=3):
''''TENG' algorithm (Krotkov86)'''
Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
FM = Gx*Gx + Gy*Gy
mn = cv2.mean(FM)[0]
if np.isnan(mn):
return np.nanmean(FM)
return mn
give_seg_image.py 文件源码
项目:Brain_Tumor_Segmentation
作者: KarthikRevanuru
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def seg(path):
p=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Test/'+"cut"+path+"_flair.nii.gz")])
shap=p[0].shape
print (shap)
leng=shap[0]*shap[1]*shap[2]
#pix=get_pixels(path)
pc=concat(p)
print (p[0].shape)
px = cv2.Sobel(p[0],cv2.CV_64F,1,0,ksize=5)
py = cv2.Sobel(p[0],cv2.CV_64F,0,1,ksize=5)
print(time.strftime('%a %H:%M:%S'))
pcx=concat1(px)
pcy=concat1(py)
print(time.strftime('%a %H:%M:%S'))
pa=ndimage.filters.convolve(p[0],np.full((5, 5, 5), 1.0/125),mode='constant')
print(time.strftime('%a %H:%M:%S'))
pg=concat1(pa)
print(time.strftime('%a %H:%M:%S'))
X=reshape_feat(pc,pg,pcx,pcy,leng)
print(time.strftime('%a %H:%M:%S'))
return X
def seg(path):
p=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Train/'+"cut"+path+"_flair.nii.gz")])
y=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Train/'+"cut"+path[4:]+"_seg.nii.gz")])
shap=p[0].shape
print (shap)
leng=shap[0]*shap[1]*shap[2]
#pix=get_pixels(path)
pc=concat(p)
yc=concat(y)
print (p[0].shape)
px = cv2.Sobel(p[0],cv2.CV_64F,1,0,ksize=5)
py = cv2.Sobel(p[0],cv2.CV_64F,0,1,ksize=5)
print(time.strftime('%a %H:%M:%S'))
pcx=concat1(px)
pcy=concat1(py)
print(time.strftime('%a %H:%M:%S'))
pa=ndimage.filters.convolve(p[0],np.full((5, 5, 5), 1.0/125),mode='constant')
print(time.strftime('%a %H:%M:%S'))
pg=concat1(pa)
print(time.strftime('%a %H:%M:%S'))
X=reshape_feat(pc,pg,pcx,pcy,leng)
Y=reshape_seg(yc,leng)
print(time.strftime('%a %H:%M:%S'))
return X,Y
def getRGBS(img, PLOT = False):
image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# grab the image channels, initialize the tuple of colors,
# the figure and the flattened feature vector
features = []
featuresSobel = []
Grayscale = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
histG = cv2.calcHist([Grayscale], [0], None, [16], [0, 256])
histG = histG / histG.sum()
features.extend(histG[:,0].tolist())
grad_x = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 1, 0, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
grad_y = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 0, 1, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
dst = cv2.addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0)
histSobel = cv2.calcHist([dst], [0], None, [16], [0, 256])
histSobel = histSobel / histSobel.sum()
features.extend(histSobel[:,0].tolist())
Fnames = []
Fnames.extend(["Color-Gray"+str(i) for i in range(8)])
Fnames.extend(["Color-GraySobel"+str(i) for i in range(8)])
return features, Fnames
def HLS_sobel(img, s_thresh=(120, 255), sx_thresh=(20, 255),l_thresh=(40,255)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
#h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
# sobelx = abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255))
# l_channel_col=np.dstack((l_channel,l_channel, l_channel))
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold saturation channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Threshold lightness
l_binary = np.zeros_like(l_channel)
l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1
channels = 255*np.dstack(( l_binary, sxbinary, s_binary)).astype('uint8')
binary = np.zeros_like(sxbinary)
binary[((l_binary == 1) & (s_binary == 1) | (sxbinary==1))] = 1
binary = 255*np.dstack((binary,binary,binary)).astype('uint8')
return binary,channels
def get_gradient(im):
# Calculate the x and y gradients using Sobel operator
grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=3)
grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=3)
# Combine the two gradients
grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
# print grad.dtype
# print grad.shape
return grad
# Based on: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
def global_gradient(self):
gradient_values_x = cv2.Sobel(self.img, cv2.CV_64F, 1, 0, ksize=5)
gradient_values_y = cv2.Sobel(self.img, cv2.CV_64F, 0, 1, ksize=5)
gradient_magnitude = cv2.addWeighted(gradient_values_x, 0.5, gradient_values_y, 0.5, 0)
gradient_angle = cv2.phase(gradient_values_x, gradient_values_y, angleInDegrees=True)
return gradient_magnitude, gradient_angle
def _get_gradient_magnitude(im):
"Get magnitude of gradient for given image"
ddepth = cv2.CV_32F
dx = cv2.Sobel(im, ddepth, 1, 0)
dy = cv2.Sobel(im, ddepth, 0, 1)
dxabs = cv2.convertScaleAbs(dx)
dyabs = cv2.convertScaleAbs(dy)
mag = cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)
return np.average(mag)
def process(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gau=cv2.GaussianBlur(gray,(5,5),0)
ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
med=cv2.medianBlur(thre,5)
canny=cv2.Canny(thre,100,200)
#sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
dilation=cv2.dilate(canny,element2,iterations = 1)
dst=cv2.erode(dilation, element1, iterations = 1)
return dst
def get_gradient(self,im) :
# Calculate the x and y gradients using Sobel operator
grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=3)
grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=3)
# Combine the two gradients
grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
return grad
EdgeDetection.py 文件源码
项目:SummerProject_MacularDegenerationDetection
作者: WDongYuan
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def EdgeDetection(img):
# img = cv2.medianBlur(img,5)
img = cv2.fastNlMeansDenoising(img,None,3,7,21)
_,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
denoise_img = img
# print(img)
# cv2.imwrite("Denoise.jpg",img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# convolute with proper kernels
laplacian = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3) # y
# sobel2y = cv2.Sobel(sobely,cv2.CV_64F,0,1,ksize=3)
# sobelxy = cv2.Sobel(img,cv2.CV_64F,1,1,ksize=5) # y
canny = cv2.Canny(img,100,200)
contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(canny)
# cv2.imwrite('laplacian.jpg',laplacian)
# cv2.imwrite('sobelx.jpg',sobelx)
# cv2.imwrite('sobely.jpg',sobely)
# cv2.imwrite('sobelxy.jpg',sobelxy)
# cv2.imwrite('canny.jpg',canny)
# plt.subplot(3,2,1),plt.imshow(img,cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(3,2,2),plt.imshow(laplacian,cmap = 'gray')
# plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
# plt.subplot(3,2,3),plt.imshow(sobelx,cmap = 'gray')
# plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
# plt.subplot(3,2,4),plt.imshow(sobely,cmap = 'gray')
# plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
# plt.subplot(3,2,4),plt.imshow(sobelxy,cmap = 'gray')
# plt.title('Sobel XY'), plt.xticks([]), plt.yticks([])
# plt.subplot(3,2,5),plt.imshow(canny,cmap = 'gray')
# plt.title('Canny'), plt.xticks([]), plt.yticks([])
# plt.show()
# return {"denoise":img}
return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}
def find_contours(img):
'''
:param img: (numpy array)
:return: all possible rectangles (contours)
'''
img_blurred = cv2.GaussianBlur(img, (5, 5), 1) # remove noise
img_gray = cv2.cvtColor(img_blurred, cv2.COLOR_BGR2GRAY) # greyscale image
# cv2.imshow('', img_gray)
# cv2.waitKey(0)
# Apply Sobel filter to find the vertical edges
# Find vertical lines. Car plates have high density of vertical lines
img_sobel_x = cv2.Sobel(img_gray, cv2.CV_8UC1, dx=1, dy=0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
# cv2.imshow('img_sobel', img_sobel_x)
# Apply optimal threshold by using Oslu algorithm
retval, img_threshold = cv2.threshold(img_sobel_x, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
# cv2.imshow('s', img_threshold)
# cv2.waitKey(0)
# TODO: Try to apply AdaptiveThresh
# Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
# gaus_threshold = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 115, 1)
# cv2.imshow('or', img)
# cv2.imshow('gaus', gaus_threshold)
# cv2.waitKey(0)
# Define a stuctural element as rectangular of size 17x3 (we'll use it during the morphological cleaning)
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))
# And use this structural element in a close morphological operation
morph_img_threshold = deepcopy(img_threshold)
cv2.morphologyEx(src=img_threshold, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img_threshold)
# cv2.dilate(img_threshold, kernel=np.ones((1,1), np.uint8), dst=img_threshold, iterations=1)
# cv2.imshow('Normal Threshold', img_threshold)
# cv2.imshow('Morphological Threshold based on rect. mask', morph_img_threshold)
# cv2.waitKey(0)
# Find contours that contain possible plates (in hierarchical relationship)
contours, hierarchy = cv2.findContours(morph_img_threshold,
mode=cv2.RETR_EXTERNAL, # retrieve the external contours
method=cv2.CHAIN_APPROX_NONE) # all pixels of each contour
plot_intermediate_steps = False
if plot_intermediate_steps:
plot(plt, 321, img, "Original image")
plot(plt, 322, img_blurred, "Blurred image")
plot(plt, 323, img_gray, "Grayscale image", cmap='gray')
plot(plt, 324, img_sobel_x, "Sobel")
plot(plt, 325, img_threshold, "Threshold image")
# plot(plt, 326, morph_img_threshold, "After Morphological filter")
plt.tight_layout()
plt.show()
return contours
SLIC_new_cityscapes_training_server_1.py 文件源码
项目:SLIC_cityscapes
作者: wpqmanu
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def gradient_img(colorsrc):
'''
http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
'''
SCALE = 1
DELTA = 0
DDEPTH = cv2.CV_16S ## to avoid overflow
# grayscale image
if len(colorsrc.shape)==2:
graysrc = cv2.GaussianBlur(colorsrc, (3, 3), 0)
## gradient X ##
gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
gradx = cv2.convertScaleAbs(gradx)
## gradient Y ##
grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
grady = cv2.convertScaleAbs(grady)
grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)
return grad
# multi-channel image
else:
gradx_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
grady_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
for index in range(colorsrc.shape[2]):
graysrc=colorsrc[:,:,index]
graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)
## gradient X ##
gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
gradx = cv2.convertScaleAbs(gradx)
gradx_total=gradx_total+gradx
## gradient Y ##
grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
grady = cv2.convertScaleAbs(grady)
grady_total = grady_total + grady
grad = cv2.addWeighted(gradx_total, 0.5, grady_total, 0.5, 0)
return grad