def get_largest(im, n):
# Find contours of the shape
major = cv2.__version__.split('.')[0]
if major == '3':
_, contours, _ = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
contours, _ = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Cycle through contours and add area to array
areas = []
for c in contours:
areas.append(cv2.contourArea(c))
# Sort array of areas by size
sorted_areas = sorted(zip(areas, contours), key=lambda x: x[0], reverse=True)
if sorted_areas and len(sorted_areas) >= n:
# Find nth largest using data[n-1][1]
return sorted_areas[n - 1][1]
else:
return None
python类contourArea()的实例源码
Artificial-potential-without-controller.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def classify(img):
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img2 = cv2.medianBlur(cimg, 13)
ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
t2 = copy.copy(thresh1)
x, y = thresh1.shape
arr = np.zeros((x, y, 3), np.uint8)
final_contours = []
image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.imshow('image', image)
#k = cv2.waitKey(0)
for i in range(len(contours)):
cnt = contours[i]
if cv2.contourArea(cnt) > 35000 and cv2.contourArea(cnt) < 15000:
cv2.drawContours(img, [cnt], -1, [0, 255, 255])
cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
final_contours.append(cnt)
cv2.imshow('arr', arr)
k = cv2.waitKey(0)
return arr
HandRecognition.py 文件源码
项目:hand-gesture-recognition-opencv
作者: mahaveerverma
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def hand_contour_find(contours):
max_area=0
largest_contour=-1
for i in range(len(contours)):
cont=contours[i]
area=cv2.contourArea(cont)
if(area>max_area):
max_area=area
largest_contour=i
if(largest_contour==-1):
return False,0
else:
h_contour=contours[largest_contour]
return True,h_contour
# 4. Detect & mark fingers
def remove_blobs(image, min_area=0, max_area=sys.maxsize, threshold=128,
method='8-connected', return_mask=False):
"""Binarize image using threshold, and remove (turn into black)
blobs of connected pixels of white of size bigger or equal than
min_area but smaller or equal than max_area from the original image,
returning it afterward."""
method = method.lower()
if method == '4-connected':
method = cv2.LINE_4
elif method in ('16-connected', 'antialiased'):
method = cv2.LINE_AA
else: # 8-connected
method = cv2.LINE_8
mono_image = binarize_image(image, method='boolean', threshold=threshold)
_, all_contours, _ = cv2.findContours(mono_image, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
contours = np.array([contour for contour in all_contours
if min_area <= cv2.contourArea(contour) <= max_area])
mask = np.ones(mono_image.shape, np.uint8)
cv2.drawContours(mask, contours, -1, 0, -1, lineType=method)
return image, 255 * mask
def contourImg(image):
#Find contours in the image the first and last returns dont matter so the _ is just a placeholder to ignore them
_, contours, _ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#The contouring operation does some weird stuff to the image so this line just fills the whole thing with black
image.fill(0)
boundingRect = []
#Loops through all contours bigger than minArea pixels. That number is tweakable and determined by testing
for j in [i for i in contours if cv2.contourArea(i) > minArea]:
#br is a (list/tuple)? of the form x, y, width, height where (x,y) is the (top/bottom)? (left/right)? corner
br = cv2.boundingRect(j)
if(abs(br[2]/br[3] - INDASPECT) < indAspectTol and cv2.contourArea(j)/(br[2]*br[3]) > covTol):
boundingRect.append(br)
for x in range(0, len(boundingRect)):
for y in range(x+1, len(boundingRect)):
i = boundingRect[x]
j = boundingRect[y]
if(abs(i[1]-j[1]) < i[3]/2) and abs(abs(i[0]-j[0])/i[1] - GRPASPECT) < grpAspectTol:
return [createRectCnt(i), createRectCnt(j)]
return None
def diagContour(image):
#Find contours in the image the first and last returns dont matter so the _ is just a placeholder to ignore them
_, contours, _ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#The contouring operation does some weird stuff to the image so this line just fills the whole thing with black
image.fill(0)
boundingRect = []
firstFail = []
#Loops through all contours bigger than minArea pixels. That number is tweakable and determined by testing
for j in [i for i in contours if cv2.contourArea(i) > minArea]:
#br is a (list/tuple)? of the form x, y, width, height where (x,y) is the (top/bottom)? (left/right)? corner
br = cv2.boundingRect(j)
if(abs(br[2]/br[3] - INDASPECT) < indAspectTol and cv2.contourArea(j)/(br[2]*br[3]) > covTol):
boundingRect.append(br)
else:
firstFail.append([br, br[2]/br[3], cv2.contourArea(j)/(br[2]*br[3])])
secondRound = []
for x in range(0, len(boundingRect)):
for y in range(x+1, len(boundingRect)):
i = boundingRect[x]
j = boundingRect[y]
secondRound.append([(x,y,i,j), (abs(i[1]-j[1]), i[3]/2), abs(i[0]-j[0])/i[1]])
for x in secondRound:
if(x[1][0] < x[1][1] and x[2] - GRPASPECT < grpAspectTol):
return firstFail, secondRound, [createRectCnt(x[0][2]), createRectCnt(x[0][3])]
return firstFail, secondRound, None
def findContours(arg_img,arg_canvas, arg_MinMaxArea=False, arg_debug= False):
image= arg_img.copy()
#print image
canvas= arg_canvas.copy()
if len(image)==3:
image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
if sys.version_info.major == 2:
ctrs, hier = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
_, ctrs, hier = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if arg_MinMaxArea is not False:
ctrs = filter(lambda x : arg_MinMaxArea[1]> cv2.contourArea(x) > arg_MinMaxArea[0] , ctrs)
print '>>> ', len(ctrs)
for ctr in ctrs:
print 'Area: ', cv2.contourArea(ctr)
cv2.drawContours(canvas, [ctr], 0, (0, 128, 255), 3)
if arg_debug:
cv2.imwrite('Debug/debug_findContours.jpg',canvas)
return canvas
def get_contour(self, arg_frame, arg_export_index, arg_export_path, arg_export_filename, arg_binaryMethod):
# Otsu's thresholding after Gaussian filtering
tmp = cv2.cvtColor(arg_frame, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(tmp,(5,5),0)
if arg_binaryMethod== 0:
ret, thresholdedImg= cv2.threshold(blur.copy() , self.threshold_graylevel, 255 , 0)
elif arg_binaryMethod == 1:
ret,thresholdedImg = cv2.threshold(blur.copy(),0 ,255 ,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
elif arg_binaryMethod== 2:
thresholdedImg = cv2.adaptiveThreshold(blur.copy(),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,5,0)
result = cv2.cvtColor(thresholdedImg, cv2.COLOR_GRAY2RGB)
ctrs, hier = cv2.findContours(thresholdedImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ctrs = filter(lambda x : cv2.contourArea(x) > self.threshold_size , ctrs)
rects = [[cv2.boundingRect(ctr) , ctr] for ctr in ctrs]
for rect , cntr in rects:
cv2.drawContours(result, [cntr], 0, (0, 128, 255), 3)
if arg_export_index:
cv2.imwrite(arg_export_path+ arg_export_filename+'.jpg', result)
print "Get Contour success"
return result
arch_light_track.py 文件源码
项目:Vision_Processing-2016
作者: Sabercat-Robotics-4146-FRC
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_bounding_rect( cap, win_cap, win, upper, lower):
msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3)
im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
if len(contours) > 0:
areas = [cv2.contourArea(c) for c in contours] # get the area of each contour
max_index = np.argmax(areas) # get the index of the largest contour by area
cnts = contours[max_index] # get the largest contout by area
cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image
x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour
cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box
cv2.imshow( "debug.", win_cap )
try:
self.smt_dash.putNumber('vis_x', x)
self.smt_dash.putNumber('vis_y', y)
self.smt_dash.putNumber('vis_w', w)
self.smt_dash.putNumber('vis_h', h)
except Exception:
pass
image_transformation.py 文件源码
项目:Sign-Language-Recognition
作者: Anmol-Singh-Jaggi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def find_largest_contour_index(contours):
"""
Finds and returns the index of the largest contour from a list of contours.
Returs `None` if the contour list is empty.
"""
if len(contours) <= 0:
log_message = "The length of contour lists is non-positive!"
raise Exception(log_message)
largest_contour_index = 0
contour_iterator = 1
while contour_iterator < len(contours):
if cv2.contourArea(contours[contour_iterator]) > cv2.contourArea(contours[largest_contour_index]):
largest_contour_index = contour_iterator
contour_iterator += 1
return largest_contour_index
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.1, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def find_self():
low_white = np.array([0,0,255])
upper_white = np.array([1,255,255])
mask,mmx, mmy = get_mini_map_mask(low_white,upper_white)
_, contours, _ = cv2.findContours(mask.copy(), 1,2)
for cnt in contours:
M = cv2.moments(cnt)
print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) == 4:
#centroid from img moments
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print(cx,cy)
cv2.imshow('img', mask)
cv2.waitKey(0)
def find_yellow_birds():
ps, psx, psy = RS.getPlayingScreen()
lower_pink = np.array([28,197,168])
upper_pink = np.array([29,234,239])
mask = cv2.inRange(ps, lower_pink, upper_pink)
#cv2.imshow('img', mask)
#cv2.waitKey(0)
_, contours, _ = cv2.findContours(mask, 1,2)
# returns true if birds found
for cnt in contours:
if cv2.contourArea(cnt) > 0:
return 1
def cropCircle(img):
'''
there many imaged taken thresholded, which means many images is
present as a circle with black surrounded. This function is to
find the largest inscribed rectangle to the thresholed image and
then crop the image to the rectangle.
input: img - the cv2 module
return: img_crop, rectangle, tile_size
'''
if(img.shape[0] > img.shape[1]):
tile_size = (int(img.shape[1]*256/img.shape[0]),256)
else:
tile_size = (256, int(img.shape[0]*256/img.shape[1]))
img = cv2.resize(img, dsize=tile_size)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
_, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]
ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
cv2.drawContours(ff, main_contour, -1, 1, 15)
ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)
rect = maxRect(ff)
rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)
return [img_crop, rectangle, tile_size]
def cropCircle(img):
'''
there many imaged taken thresholded, which means many images is
present as a circle with black surrounded. This function is to
find the largest inscribed rectangle to the thresholed image and
then crop the image to the rectangle.
input: img - the cv2 module
return: img_crop, rectangle, tile_size
'''
if(img.shape[0] > img.shape[1]):
tile_size = (int(img.shape[1]*256/img.shape[0]),256)
else:
tile_size = (256, int(img.shape[0]*256/img.shape[1]))
img = cv2.resize(img, dsize=tile_size)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
_, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]
ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
cv2.drawContours(ff, main_contour, -1, 1, 15)
ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)
rect = maxRect(ff)
rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)
return [img_crop, rectangle, tile_size]
digital_display_ocr.py 文件源码
项目:digital-display-character-rec
作者: upupnaway
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def find_display_contour(edge_img_arr):
display_contour = None
edge_copy = edge_img_arr.copy()
contours,hierarchy = cv2.findContours(edge_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
top_cntrs = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
for cntr in top_cntrs:
peri = cv2.arcLength(cntr,True)
approx = cv2.approxPolyDP(cntr, 0.02 * peri, True)
if len(approx) == 4:
display_contour = approx
break
return display_contour