def logoDetect(img,imgo):
'''???????????????'''
imglogo=imgo.copy()
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
#img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
img=cv2.Canny(img,100,200)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img = cv2.dilate(img, element2,iterations = 1)
img = cv2.erode(img, element1, iterations = 3)
img = cv2.dilate(img, element2,iterations = 3)
#????
im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
tema=0
result=[]
for con in contours:
x,y,w,h=cv2.boundingRect(con)
area=w*h
ratio=max(w/h,h/w)
if area>300 and area<20000 and ratio<2:
if area>tema:
tema=area
result=[x,y,w,h]
ratio2=ratio
#?????????????????,??????????
logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
print tema,ratio2,result
logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
cv2.imwrite('./logo2.jpg',logo2)
return img
python类boundingRect()的实例源码
def remove_border(contour, ary):
"""Remove everything outside a border contour."""
# Use a rotated rectangle (should be a good approximation of a border).
# If it's far from a right angle, it's probably two sides of a border and
# we should use the bounding box instead.
c_im = np.zeros(ary.shape)
r = cv2.minAreaRect(contour)
degs = r[2]
if angle_from_right(degs) <= 10.0:
box = cv2.cv.BoxPoints(r)
box = np.int0(box)
cv2.drawContours(c_im, [box], 0, 255, -1)
cv2.drawContours(c_im, [box], 0, 0, 4)
else:
x1, y1, x2, y2 = cv2.boundingRect(contour)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)
return np.minimum(c_im, ary)
def get_contour(self, arg_frame, arg_export_index, arg_export_path, arg_export_filename, arg_binaryMethod):
# Otsu's thresholding after Gaussian filtering
tmp = cv2.cvtColor(arg_frame, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(tmp,(5,5),0)
if arg_binaryMethod== 0:
ret, thresholdedImg= cv2.threshold(blur.copy() , self.threshold_graylevel, 255 , 0)
elif arg_binaryMethod == 1:
ret,thresholdedImg = cv2.threshold(blur.copy(),0 ,255 ,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
elif arg_binaryMethod== 2:
thresholdedImg = cv2.adaptiveThreshold(blur.copy(),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,5,0)
result = cv2.cvtColor(thresholdedImg, cv2.COLOR_GRAY2RGB)
ctrs, hier = cv2.findContours(thresholdedImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ctrs = filter(lambda x : cv2.contourArea(x) > self.threshold_size , ctrs)
rects = [[cv2.boundingRect(ctr) , ctr] for ctr in ctrs]
for rect , cntr in rects:
cv2.drawContours(result, [cntr], 0, (0, 128, 255), 3)
if arg_export_index:
cv2.imwrite(arg_export_path+ arg_export_filename+'.jpg', result)
print "Get Contour success"
return result
def do(self, bin_img):
tmp_bin_img = np.copy(bin_img)
if cv2.__version__[0] == "2":
contours, hierarchy = cv2.findContours(
tmp_bin_img,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, hierarchy = cv2.findContours(
tmp_bin_img,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)
filtered_contours = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if w * h > self.max_area or w * h < self.min_area:
bin_img[y:y+h, x:x+w] = 0
contours = filtered_contours
def get_text_with_location(boxed_image, contours, img):
image_text_dict = {}
for contour in contours:
# get rectangle bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
# draw rectangle around contour on original image
if w < 20 or h < 20:
continue
cv2.rectangle(boxed_image, (x, y), (x + w + 10, y + h + 10), thickness=2, color=0)
box_read = extract_image_from_location(img, x, y, w, h)
box_read = box_read.strip()
image_text_dict[(x, y)] = box_read
return image_text_dict
arch_light_track.py 文件源码
项目:Vision_Processing-2016
作者: Sabercat-Robotics-4146-FRC
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def get_bounding_rect( cap, win_cap, win, upper, lower):
msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3)
im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
if len(contours) > 0:
areas = [cv2.contourArea(c) for c in contours] # get the area of each contour
max_index = np.argmax(areas) # get the index of the largest contour by area
cnts = contours[max_index] # get the largest contout by area
cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image
x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour
cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box
cv2.imshow( "debug.", win_cap )
try:
self.smt_dash.putNumber('vis_x', x)
self.smt_dash.putNumber('vis_y', y)
self.smt_dash.putNumber('vis_w', w)
self.smt_dash.putNumber('vis_h', h)
except Exception:
pass
def rotate_image(img_src, angle,scale ,crop=True):
img_src,size_dest= pad_image(img_src,scale)
size = tuple(np.array([img_src.shape[1], img_src.shape[0]]))
org_h=size[1]
org_w=size[0]
src_r = np.sqrt((size[0]/2.0)**2+(size[1]/2.0)**2)
org_angle =np.arctan(float(org_h)/org_w)
dest_h = size_dest[0]
dest_w = size_dest[1]
center = tuple(np.array([img_src.shape[1] * 0.5, img_src.shape[0] * 0.5]))
dsize= (dest_w,dest_h)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
img_rot = cv2.warpAffine(img_src, rotation_matrix, size, flags=cv2.INTER_CUBIC)
if crop:
x,y,w,h = cv2.boundingRect(img_rot[:,:,3])
return img_rot[y:y+h, x:x+w,:]
else:
return img_rot
def rotate_image(img_src, angle,scale ):
img_src,size_dest= pad_image(img_src,scale)
size = tuple(np.array([img_src.shape[1], img_src.shape[0]]))
org_h=size[1]
org_w=size[0]
src_r = np.sqrt((size[0]/2.0)**2+(size[1]/2.0)**2)
org_angle =np.arctan(float(org_h)/org_w)
dest_h = size_dest[0]
dest_w = size_dest[1]
center = tuple(np.array([img_src.shape[1] * 0.5, img_src.shape[0] * 0.5]))
dsize= (dest_w,dest_h)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
img_rot = cv2.warpAffine(img_src, rotation_matrix, size, flags=cv2.INTER_CUBIC)
x,y,w,h = cv2.boundingRect(img_rot[:,:,3])
return img_rot[y:y+h, x:x+w,:]
def cluster_bounding_boxes(self, contours):
bounding_boxes = []
for i in range(len(contours)):
x1,y1,w1,h1 = cv2.boundingRect(contours[i])
parent_bounding_box = self.get_parent_bounding_box(bounding_boxes, i)
if parent_bounding_box is None:
parent_bounding_box = self.BoundingBox(Rect(x1, y1, w1, h1))
parent_bounding_box.members.append(i)
bounding_boxes.append(parent_bounding_box)
for j in range(i+1, len(contours)):
if self.get_parent_bounding_box(bounding_boxes, j) is None:
x2,y2,w2,h2 = cv2.boundingRect(contours[j])
rect = Rect(x2, y2, w2, h2)
distance = parent_bounding_box.rect.distance_to_rect(rect)
if distance < 100:
parent_bounding_box.update_rect(self.extend_rectangle(parent_bounding_box.rect, rect))
parent_bounding_box.members.append(j)
return bounding_boxes
def extra_processing(pipeline):
"""
Performs extra processing on the pipeline's outputs and publishes data to NetworkTables.
:param pipeline: the pipeline that just processed an image
:return: None
"""
center_x_positions = []
center_y_positions = []
widths = []
heights = []
# Find the bounding boxes of the contours to get x, y, width, and height
for contour in pipeline.filter_contours_output:
x, y, w, h = cv2.boundingRect(contour)
center_x_positions.append(x + w / 2) # X and Y are coordinates of the top-left corner of the bounding box
center_y_positions.append(y + h / 2)
widths.append(w)
heights.append(y)
# Publish to the '/vision' network table
table = NetworkTable.getTable("/vision")
table.putValue("centerX", NumberArray.from_list(center_x_positions))
table.putValue("centerY", NumberArray.from_list(center_y_positions))
table.putValue("width", NumberArray.from_list(widths))
table.putValue("height", NumberArray.from_list(heights))
def extra_processing(pipeline):
"""
Performs extra processing on the pipeline's outputs and publishes data to NetworkTables.
:param pipeline: the pipeline that just processed an image
:return: None
"""
center_x_positions = []
center_y_positions = []
widths = []
heights = []
# Find the bounding boxes of the contours to get x, y, width, and height
for contour in pipeline.filter_contours_output:
x, y, w, h = cv2.boundingRect(contour)
center_x_positions.append(x + w / 2) # X and Y are coordinates of the top-left corner of the bounding box
center_y_positions.append(y + h / 2)
widths.append(w)
heights.append(y)
print(center_x_positions)
# Publish to the '/vision' network table
table = NetworkTable.getTable("/vision")
table.putValue("centerX", NumberArray.from_list(center_x_positions))
table.putValue("centerY", NumberArray.from_list(center_y_positions))
table.putValue("width", NumberArray.from_list(widths))
table.putValue("height", NumberArray.from_list(heights))
def extra_processing(pipeline):
"""
Performs extra processing on the pipeline's outputs and publishes data to NetworkTables.
:param pipeline: the pipeline that just processed an image
:return: None
"""
center_x_positions = []
center_y_positions = []
widths = []
heights = []
# Find the bounding boxes of the contours to get x, y, width, and height
for contour in pipeline.filter_contours_output:
x, y, w, h = cv2.boundingRect(contour)
center_x_positions.append(x + w / 2) # X and Y are coordinates of the top-left corner of the bounding box
center_y_positions.append(y + h / 2)
widths.append(w)
heights.append(y)
# Publish to the '/vision' network table
table = NetworkTable.getTable("/vision")
table.putValue("centerX", NumberArray.from_list(center_x_positions))
table.putValue("centerY", NumberArray.from_list(center_y_positions))
table.putValue("width", NumberArray.from_list(widths))
table.putValue("height", NumberArray.from_list(heights))
image_transformation.py 文件源码
项目:Sign-Language-Recognition
作者: Anmol-Singh-Jaggi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def draw_contours(frame):
"""
Draws a contour around white color.
"""
print("Drawing contour around white color...")
# 'contours' is a list of contours found.
contours, _ = cv2.findContours(
frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Finding the contour with the greatest area.
largest_contour_index = find_largest_contour_index(contours)
# Draw the largest contour in the image.
cv2.drawContours(frame, contours,
largest_contour_index, (255, 255, 255), thickness=-1)
# Draw a rectangle around the contour perimeter
contour_dimensions = cv2.boundingRect(contours[largest_contour_index])
# cv2.rectangle(sign_image,(x,y),(x+w,y+h),(255,255,255),0,8)
print("Done!")
return (frame, contour_dimensions)
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.1, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def findFishingIcon():
#fish color
low = np.array([93,119,84])
high = np.array([121,255,255])
mask, mm_x, mm_y = get_mini_map_mask(low, high)
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
x += mm_x
y += mm_y
x2 = x + w
y2 = y + h
Mouse.randMove(x,y,x2,y2,1)
run= 0
RandTime.randTime(1,0,0,1,9,9)
return 0
return 1
def findFishingIcon(self):
#fish color
low = np.array([93,119,84])
high = np.array([121,255,255])
mask, mm_x, mm_y = self.mini_map_mask(low, high)
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
x += mm_x
y += mm_y
x2 = x + w
y2 = y + h
Mouse.randMove(x,y,x2,y2,1)
run= 0
time.sleep(1)
return 0
return 1
def process_letter(thresh,output):
# assign the kernel size
kernel = np.ones((2,1), np.uint8) # vertical
# use closing morph operation then erode to narrow the image
temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
# temp_img = cv2.erode(thresh,kernel,iterations=2)
letter_img = cv2.erode(temp_img,kernel,iterations=1)
# find contours
(contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# loop in all the contour areas
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)
return output
#processing letter by letter boxing