def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
# edges = cv2.Canny(img,obj.minVal,obj.maxVal)
# color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
# edges=color
#
kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)
if True:
print "zeige"
cv2.imshow(obj.Label,opening)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=opening
python类Canny()的实例源码
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
edges = cv2.Canny(img,obj.minVal,obj.maxVal)
color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
edges=color
if True:
print "zeige"
cv2.imshow(obj.Label,edges)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=edges
def createCV_canny():
print "create CV canny ..."
obj=FreeCAD.ActiveDocument.addObject('App::DocumentObjectGroupPython','Canny')
obj.addProperty('App::PropertyFile','imageFile',"base").imageFile='/home/thomas/Bilder/bn_900.png'
obj.addProperty('App::PropertyLink','imageNode',"base")
obj.addProperty('App::PropertyBool','imageFromNode',"base").imageFromNode=False
obj.addProperty('App::PropertyInteger','minVal',"canny").minVal=100
obj.addProperty('App::PropertyInteger','maxVal',"canny").maxVal=200
_CV_canny(obj,'/icons/bounder.png')
_ViewProviderCV_canny(obj.ViewObject,__dir__+ '/icons/icon1.svg')
app=MyApp()
miki2=miki.Miki()
miki2.app=app
app.root=miki2
app.obj=obj
obj.ViewObject.Proxy.cmenu.append(["Dialog",lambda:miki2.run(MyApp.s6)])
obj.ViewObject.Proxy.edit= lambda:miki2.run(MyApp.s6)
return obj
#
# derived classes
#
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
# edges = cv2.Canny(img,obj.minVal,obj.maxVal)
# color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
# edges=color
#
kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)
if True:
print "zeige"
cv2.imshow(obj.Label,closing)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=closing
def execute_CannyEdge(proxy,obj):
''' create Canny Edge image with two parameters'''
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
edges = cv2.Canny(img,obj.minVal,obj.maxVal)
obj.Proxy.img = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
say(["Canny Edge image updated",obj.minVal,obj.maxVal])
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((*img.shape, 3), dtype = np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def process_image(image):
# printing out some stats and plotting
print('This image is:', type(image), 'with dimesions:', image.shape)
gray = grayscale(image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = gaussian_blur(gray, kernel_size)
# plt.imshow(blur_gray, cmap='gray')
# Define our parameters for Canny and apply
low_threshold = 45 #50
high_threshold = 150 #150
edges = canny(blur_gray, low_threshold, high_threshold)
# This time we are defining a four sided polygon to mask
imshape = image.shape
#vertices = np.array([[(0,imshape[0]),(475, 310), (475, 310), (imshape[1],imshape[0])]], dtype=np.int32)
vertices = np.array([[(0,imshape[0]),(450, 330), (490, 310), (imshape[1],imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 #minimum number of pixels making up a line 150 - 40
max_line_gap = 130 # maximum gap in pixels between connectable line segments 58 -95
line_image = np.copy(image)*0 # creating a blank to draw lines on
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
# Draw the lines on the edge image
lines_edges = weighted_img(lines, image)
return lines_edges
def setCallBack(self, update_func):
self._update_func = update_func
# OpenCV?Trackbar?Canny Edge??????
def detect_edges(images):
def blur(image):
return cv2.GaussianBlur(image, (5, 5), 0)
def canny_otsu(image):
scale_factor = 255
scaled_image = np.uint8(image * scale_factor)
otsu_threshold = cv2.threshold(
cv2.cvtColor(scaled_image, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0]
lower_threshold = max(0, int(otsu_threshold * 0.5))
upper_threshold = min(255, int(otsu_threshold))
edges = cv2.Canny(scaled_image, lower_threshold, upper_threshold)
edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
return np.float32(edges) * (1 / scale_factor)
blurred = [blur(image) for image in images]
canny_applied = [canny_otsu(image) for image in blurred]
return canny_applied
def process_img(img):
original_image=img
processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
copy=processed_img
vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
processed_img = roi(processed_img, np.int32([vertices]))
verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
platform = roi(copy, np.int32([verticesP]))
# edges
#lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
#draw_lines(processed_img,lines)
#draw_lines(original_image,lines)
#Platform lines
#imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(platform,127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
try:
platformpos=contours[0][0][0]
except:
platformpos=[[0]]
circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
param1=90, param2=5, minRadius=1, maxRadius=3)
ballpos=draw_circles(original_image,circles=circles)
return processed_img,original_image,platform,platformpos,ballpos
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
'''
contour_retrieval_mode is passed through as second argument to cv2.findContours
'''
# Attempt to match edges found in blue, green or red channels : collect all
channel = 0
for gray in cv2.split(self.img):
channel += 1
print('channel %d ' % channel)
title = self.tgen.next('channel-%d' % channel)
if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
found = {}
for thrs in xrange(0, 255, 26):
print('Using threshold %d' % thrs)
if thrs == 0:
print('First step')
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
title = self.tgen.next('canny-%d' % channel)
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
bin = cv2.dilate(bin, None)
title = self.tgen.next('canny-dilate-%d' % channel)
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
filteredContours = contours
else:
filteredContours = []
h = hierarchy[0]
for component in zip(contours, h):
currentContour = component[0]
currentHierarchy = component[1]
if currentHierarchy[3] < 0:
# Found the outermost parent component
filteredContours.append(currentContour)
print('Contours filtered. Input %d Output %d' % (len(contours), len(filteredContours)))
time.sleep(5)
for cnt in filteredContours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
cnt_len = len(cnt)
cnt_area = cv2.contourArea(cnt)
cnt_isConvex = cv2.isContourConvex(cnt)
if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max) and cnt_isConvex:
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < self.cos_limit :
sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
self.squares.append(sq)
else:
#print('dropped a square with max_cos %f' % max_cos)
pass
found[thrs] = len(self.squares)
print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
def detect(img, seed):
seed = (seed[1], seed[0])
shape = get_size(img)
[Ms, Ml, Mshadow] = [np.zeros(shape, dtype=np.uint8) for i in range(3)]
tmp = np.array(img)
for i in range(DOWNSAMPLE):
img_ds = downsample(tmp)
tmp = img_ds
seed_ds = (int(seed[0]*RATE**DOWNSAMPLE), int(seed[1]*RATE**DOWNSAMPLE))
seed_pixel = img_ds[seed_ds]
shape_ds = (int(shape[0]*RATE**DOWNSAMPLE), int(shape[1]*RATE**DOWNSAMPLE))
edges = cv2.Canny(cv2.GaussianBlur(img, (3, 3), 0), CANNY[0], CANNY[1])
seed_mask_ds = np.zeros(get_size(img_ds))
seed_mask_ds[seed_ds] = 1
vis = np.zeros(get_size(img_ds))
directions = ((1, 0), (0, 1), (-1, 0), (0, -1))
def search(point, seed_pixel):
if point[0] < 0 or point[1] < 0 or point[0] >= shape_ds[0] or point[1] >= shape_ds[1]:
return
if vis[point]:
return
elif edges[point]:
return
elif(dist(img_ds[point], seed_pixel) < COL_SEED):
vis[point] = 1
seed_mask_ds[point] = 1
for i in range(4):
search((point[0]+directions[i][0], point[1]+directions[i][1]), seed_pixel)
for i in range(SEED_ITER):
search(seed_ds, seed_pixel)
seed_pixel = np.mean(np.mean(img_ds[np.where(seed_mask_ds == 1)], axis = 0), axis = 0)
return seed_ds
def grow_seed(img, seed_loc):
tmp = np.array(img)
for i in range(DOWNSAMPLE): # Downsample the original image
img_ds = downsample(tmp)
tmp = img_ds
seed_loc_ds = (int(seed_loc[0]*RATE**DOWNSAMPLE), int(seed_loc[1]*RATE**DOWNSAMPLE))
seed_pixel_ds = img_ds[seed_loc_ds]
edges_ds = cv2.Canny(cv2.GaussianBlur(img_ds, (3, 3), 0), CANNY[0], CANNY[1])
seed_mask_ds = np.zeros(get_size(img_ds))
seed_mask_ds[seed_loc_ds] = 1
visited = np.zeros(get_size(img_ds))
def search(point, seed_pixel):
if point[0] < 0 or point[1] < 0 or point[0] >= img_ds.shape[0] or point[1] >= img_ds.shape[1]:
return
if visited[point]:
return
elif edges_ds[point]:
return
elif(dist(img_ds[point], seed_pixel) < SEED_TOL):
visited[point] = 1
seed_mask_ds[point] = 1
for i in range(4):
search((point[0]+search_directions[i][0], point[1]+search_directions[i][1]), seed_pixel)
for i in range(SEED_ITER):
search(seed_loc_ds, seed_pixel_ds)
seed_pixel_ds = np.mean(img_ds[np.where(seed_mask_ds == 1)], axis = 0)
print(seed_pixel_ds)
visited[:, :] = 0
for i in range(DOWNSAMPLE):
seed_mask_ds = upsample(seed_mask_ds)
seed_mask_ds[np.where(seed_mask_ds > 0.5)] = 1
seed_mask_ds[np.where(seed_mask_ds <= 0.5)] = 0
return seed_mask_ds
def find_bib(image):
width, height, depth = image.shape
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
#gray = cv2.equalizeHist(gray)
blurred = cv2.GaussianBlur(gray,(5,5),0)
debug_output("find_bib_blurred", blurred)
#binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0);
ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
#ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY);
debug_output("find_bib_binary", binary)
threshold_contours,hierarchy = find_contours(binary)
debug_output("find_bib_threshold", binary)
edges = cv2.Canny(gray,175,200, 3)
edge_contours,hierarchy = find_contours(edges)
debug_output("find_bib_edges", edges)
contours = threshold_contours + edge_contours
debug_output_contours("find_bib_threshold_contours", image, contours)
rectangles = get_rectangles(contours)
debug_output_contours("find_bib_rectangles", image, rectangles)
potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)]
debug_output_contours("find_bib_potential_bibs", image, potential_bibs)
ideal_aspect_ratio = 1.0
potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio))
return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]])
#
# Checks that the size and aspect ratio of the contour is appropriate for a bib.
#
def diff_rect(img1, img2, pos=None):
"""find counters include pos in differences between img1 & img2 (cv2 images)"""
diff = cv2.absdiff(img1, img2)
diff = cv2.GaussianBlur(diff, (3, 3), 0)
edges = cv2.Canny(diff, 100, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if not contours:
return None
contours.sort(key=lambda c: len(c))
# no pos provide, just return the largest different area rect
if pos is None:
cnt = contours[-1]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
return (x0, y0, x1, y1)
# else the rect should contain the pos
x, y = pos
for i in range(len(contours)):
cnt = contours[-1-i]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
if x0 <= x <= x1 and y0 <= y <= y1:
return (x0, y0, x1, y1)
def predict():
response = requests.get(slide_captcha_url)
base64_image = response.json()['data']['dataUrl']
base64_image_without_head = base64_image.replace('data:image/png;base64,', '')
bytes_io = BytesIO(base64.b64decode(base64_image_without_head))
img = np.array(Image.open(bytes_io).convert('RGB'))
img_blur = cv2.GaussianBlur(img, (3, 3), 0)
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
img_canny = cv2.Canny(img_gray, 100, 200)
operator = get_operator('shape.png')
(x, y), _ = best_match(img_canny, operator)
x = x + bias
print('the position of x is', x)
buffer = mark(img, x, y)
return {'value': x, 'image': base64.b64encode(buffer.getbuffer()).decode()}
car_recognizer.py 文件源码
项目:Vision-based-parking-lot-availability-OpenCV
作者: Saar1312
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def getEdges(gray,detector,min_thr=None,max_thr=None):
"""
Where detector in {1,2,3,4}
1: Laplacian
2: Sobelx
3: Sobely
4: Canny
5: Sobelx with possitive and negative slope (in 2 negative slopes are lost)
"""
if min_thr is None:
min_thr = 100
max_thr = 200
if detector == 1:
return cv2.Laplacian(gray,cv2.CV_64F)
elif detector == 2:
return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1)
elif detector == 3:
return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1)
elif detector == 4:
return cv2.Canny(gray,min_thr,max_thr) # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the
# intensity gradient -value that measures how different is a pixel to its neighbors-)
elif detector == 5:
sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5)
abs_sobel64f = np.absolute(sobelx64f)
return np.uint8(abs_sobel64f)
def main():
# parse command line options
if len(sys.argv) != 2:
print 'Usage: python input_name output_name'
exit(1)
filePath = sys.argv[1]
print "<----- processing %s ----->" % filePath
#???????????????????????????????
img = cv2.imread(filePath, 0)
img = cv2.resize(img, (1200, 900))
# ??????
# imgArr = np.array(img)
# imgMean = np.mean(img)
# imgcopy = imgArr - imgMean
# imgcopy = imgcopy * 2 + imgMean * 3
# imgcopy = imgcopy / 255
canny = cv2.Canny(img, 60, 300)
inverted = cv2.bitwise_not(canny)
cv2.imshow('Canny', inverted)
test1 = Image.fromarray(canny)
test2 = Image.fromarray(inverted)
result = pytesseract.image_to_string(test1, lang="eng", config="-c tessedit_char_whitelist=0123456789X")
print result
print "-------"
result = pytesseract.image_to_string(test2, lang="eng")
print result
k = cv2.waitKey(0)
def process(img):
img=cv2.medianBlur(img,5)
kernel=np.ones((3,3),np.uint8)
#img=cv2.erode(img,kernel,iterations = 1)
sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
dilation = cv2.dilate(sobel, element2, iterations = 1)
erosion = cv2.erode(dilation, element1, iterations = 1)
dilation2 = cv2.dilate(erosion, element2,iterations = 3)
#img=cv2.dilate(img,kernel,iterations = 1)
#img=cv2.Canny(img,100,200)
return dilation2