def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
python类dilate()的实例源码
def _extract_spots(self) -> None:
# Dilate and Erode to 'clean' the spot (nb that this harms the number itself, so we only do it to extract spots)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img = cv2.dilate(self._img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=2)
img = cv2.dilate(img, kernel, iterations=1)
# Perform a simple blob detect
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 20 # The dot in 20pt font has area of about 30
params.filterByCircularity = True
params.minCircularity = 0.7
params.filterByConvexity = True
params.minConvexity = 0.8
params.filterByInertia = True
params.minInertiaRatio = 0.4
detector = cv2.SimpleBlobDetector_create(params)
self.spot_keypoints = detector.detect(img)
# Log intermediate image
img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))
def apply_filters(self, image, denoise=False):
""" This method is used to apply required filters to the
to extracted regions of interest. Every square in a
sudoku square is considered to be a region of interest,
since it can potentially contain a value. """
# Convert to grayscale
source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Denoise the grayscale image if requested in the params
if denoise:
denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
# source_blur = denoised_gray
else:
source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
if ENABLE_PREVIEW_ALL:
image_preview(source_dilated)
return source_dilated
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
area = cv2.contourArea(cnt)
if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07):
squares.append(cnt)
return squares
def find_squares(img, cos_limit = 0.1):
print('search for squares with threshold %f' % cos_limit)
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < cos_limit :
squares.append(cnt)
else:
#print('dropped a square with max_cos %f' % max_cos)
pass
return squares
###
### Version V2. Collect meta-data along the way, with commentary added.
###
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([70,50,50])
upper_green = np.array([85,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
print "no blobs"
im_with_keypoints=crop_frame
return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def read_images( filenames, domain=None, image_size=64):
images = []
for fn in filenames:
image = cv2.imread(fn)
if image is None:
continue
if domain == 'A':
kernel = np.ones((3,3), np.uint8)
image = image[:, :256, :]
image = 255. - image
image = cv2.dilate( image, kernel, iterations=1 )
image = 255. - image
elif domain == 'B':
image = image[:, 256:, :]
image = cv2.resize(image, (image_size,image_size))
image = image.astype(np.float32) / 255.
image = image.transpose(2,0,1)
images.append( image )
images = np.stack( images )
return images
def dif_gaus(image, lower, upper):
lower, upper = int(lower-1), int(upper-1)
lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0)
upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0)
# upper +=50
# lower +=50
dif = lower-upper
# dif *= .1
# dif = cv2.medianBlur(dif,3)
# dif = 255-dif
dif = cv2.inRange(dif, np.asarray(200),np.asarray(256))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
dif = cv2.dilate(dif, kernel, iterations=2)
dif = cv2.erode(dif, kernel, iterations=1)
# dif = cv2.max(image,dif)
# dif = cv2.dilate(dif, kernel, iterations=1)
return dif
def erase_specular(image,lower_threshold=0.0, upper_threshold=150.0):
"""erase_specular: removes specular reflections
within given threshold using a binary mask (hi_mask)
"""
thresh = cv2.inRange(image,
np.asarray(float(lower_threshold)),
np.asarray(256.0))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
hi_mask = cv2.dilate(thresh, kernel, iterations=2)
specular = cv2.inpaint(image, hi_mask, 2, flags=cv2.INPAINT_TELEA)
# return cv2.max(hi_mask,image)
return specular
def skin_detect(self, raw_yrb, img_src):
# use median blurring to remove signal noise in YCRCB domain
raw_yrb = cv2.medianBlur(raw_yrb, 5)
mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
# morphological transform to remove unwanted part
kernel = np.ones((5, 5), np.uint8)
#mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel)
mask_skin = cv2.dilate(mask_skin, kernel, iterations=2)
res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin)
#res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21)
return res_skin
# Do background subtraction with some filtering
def animpingpong(self):
print self
print self.Object
print self.Object.Name
obj=self.Object
img = cv2.imread(obj.imageFile)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,3,3,0.00001)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
'''
contour_retrieval_mode is passed through as second argument to cv2.findContours
'''
# Attempt to match edges found in blue, green or red channels : collect all
channel = 0
for gray in cv2.split(self.img):
channel += 1
print('channel %d ' % channel)
title = self.tgen.next('channel-%d' % channel)
if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
found = {}
for thrs in xrange(0, 255, 26):
print('Using threshold %d' % thrs)
if thrs == 0:
print('First step')
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
title = self.tgen.next('canny-%d' % channel)
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
bin = cv2.dilate(bin, None)
title = self.tgen.next('canny-dilate-%d' % channel)
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
filteredContours = contours
else:
filteredContours = []
h = hierarchy[0]
for component in zip(contours, h):
currentContour = component[0]
currentHierarchy = component[1]
if currentHierarchy[3] < 0:
# Found the outermost parent component
filteredContours.append(currentContour)
print('Contours filtered. Input %d Output %d' % (len(contours), len(filteredContours)))
time.sleep(5)
for cnt in filteredContours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
cnt_len = len(cnt)
cnt_area = cv2.contourArea(cnt)
cnt_isConvex = cv2.isContourConvex(cnt)
if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max) and cnt_isConvex:
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < self.cos_limit :
sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
self.squares.append(sq)
else:
#print('dropped a square with max_cos %f' % max_cos)
pass
found[thrs] = len(self.squares)
print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
def find_chars(img):
gray = np.array(img.convert("L"))
ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
image_final = cv2.bitwise_and(gray, gray, mask=mask)
ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(new_img, kernel, iterations=1)
# Image.fromarray(dilated).save('out.png') # for debugging
_, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
coords = []
for contour in contours:
# get rectangle bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
# ignore large chars (probably not chars)
if w > 70 and h > 70:
continue
coords.append((x, y, w, h))
return coords
# find list of eye coordinates in image
def find_components(im, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
dilation = dilate(im, kernel, 6)
count = 21
n = 0
sigma = 0.000
while count > max_components:
n += 1
sigma += 0.005
result = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(result) == 3:
_, contours, hierarchy = result
elif len(result) == 2:
contours, hierarchy = result
possible = find_likely_rectangles(contours, sigma)
count = len(possible)
return (dilation, possible, n)
def find_components(edges, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
# Perform increasingly aggressive dilation until there are just a few
# connected components.
count = 21
dilation = 5
n = 1
while count > 16:
n += 1
dilated_image = dilate(edges, N=3, iterations=n)
_, contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = len(contours)
#print dilation
#Image.fromarray(edges).show()
#Image.fromarray(255 * dilated_image).show()
return contours
def remove_ridges(image, width=6, threshold=160, dilation=1,
return_mask=False):
"""Detect ridges of width pixels using the highest eigenvector of the
Hessian matrix, then create a binarized mask with threshold and remove
it from image (set to black). Default values are optimized for text
detection and removal.
A dilation radius in pixels can be passed in to thicken the mask prior
to being applied."""
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# The value of sigma is calculated according to Steger's work:
# An Unbiased Detector of Curvilinear Structures,
# IEEE Transactions on Pattern Analysis and Machine Intelligence,
# Vol. 20, No. 2, Feb 1998
# http://ieeexplore.ieee.org/document/659930/
sigma = (width / 2) / np.sqrt(3)
hxx, hxy, hyy = feature.hessian_matrix(gray_image, sigma=sigma, order='xy')
large_eigenvalues, _ = feature.hessian_matrix_eigvals(hxx, hxy, hyy)
mask = convert(large_eigenvalues)
mask = binarize_image(mask, method='boolean', threshold=threshold)
if dilation:
dilation = (2 * dilation) + 1
dilation_kernel = np.ones((dilation, dilation), np.uint8)
mask = cv2.dilate(mask, dilation_kernel)
return image, 255 - mask
def get_contour_portion(images,segb):
ns = images.shape[0];
nt = images.shape[1];
portion = np.zeros((ns,nt));
for s in range(ns):
for t in range(nt):
img = images[s,t,0];
seg = segb[nt*s+t,0];
if np.sum(seg)<10:
portion[s,t] = 0.0;
continue;
mask = cv2.dilate(seg,np.ones((7,7)))-seg>0;
z = img[mask];
x,y = np.where(mask);
lvinside = np.mean(img[seg>0]);
lvoutside = np.percentile(z,20);
ccut = lvinside * 0.3 + lvoutside * 0.7;
cnt_sh = get_contour_shape(x,y,z);
if cnt_sh is None:
portion[s,t] = 0.0;
else:
res = get_eff_portion(cnt_sh,ccut);
portion[s,t] = res;
return portion;
def _do_filter(self, frame):
''' Process a single frame. '''
# blur to reduce noise
frame = cv2.GaussianBlur(frame, (5, 5), 0, borderType=cv2.BORDER_CONSTANT)
# threshold to find contiguous regions of "bright" pixels
# ignore all "dark" (<1/8 max) pixels
max = numpy.max(frame)
min = numpy.min(frame)
# if the frame is completely dark, then just return it
if max == min:
return frame
threshold = min + (max - min) / 8
_, frame = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY)
# filter out single pixels and other noise
frame = cv2.erode(frame, self._element_shrink)
# restore and join nearby regions (in case one fish has a skinny middle...)
frame = cv2.dilate(frame, self._element_grow)
return frame
def __init__(self):
super(TargetFilterBGSub, self).__init__()
# background subtractor
#self._bgs = cv2.BackgroundSubtractorMOG()
#self._bgs = cv2.BackgroundSubtractorMOG2() # not great defaults, and need bShadowDetection to be False
#self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20)
# varThreshold: higher values detect fewer/smaller changed regions
self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False)
# ??? history is ignored? Only if learning_rate is > 0, or...? Unclear.
# Learning rate for background subtractor.
# 0 = never adapts after initial background creation.
# A bit above 0 looks good.
# Lower values are better for detecting slower movement, though it
# takes a bit of time to learn the background initially.
self._learning_rate = 0.001
# elements to reuse in erode/dilate
# CROSS elimates more horizontal/vertical lines and leaves more
# blobs with extent in both axes [than RECT].
self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def process(img):
img=cv2.medianBlur(img,5)
kernel=np.ones((3,3),np.uint8)
#img=cv2.erode(img,kernel,iterations = 1)
sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
dilation = cv2.dilate(sobel, element2, iterations = 1)
erosion = cv2.erode(dilation, element1, iterations = 1)
dilation2 = cv2.dilate(erosion, element2,iterations = 3)
#img=cv2.dilate(img,kernel,iterations = 1)
#img=cv2.Canny(img,100,200)
return dilation2
def logoDetect(img,imgo):
'''???????????????'''
imglogo=imgo.copy()
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
#img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
img=cv2.Canny(img,100,200)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img = cv2.dilate(img, element2,iterations = 1)
img = cv2.erode(img, element1, iterations = 3)
img = cv2.dilate(img, element2,iterations = 3)
#????
im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
tema=0
result=[]
for con in contours:
x,y,w,h=cv2.boundingRect(con)
area=w*h
ratio=max(w/h,h/w)
if area>300 and area<20000 and ratio<2:
if area>tema:
tema=area
result=[x,y,w,h]
ratio2=ratio
#?????????????????,??????????
logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
print tema,ratio2,result
logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
cv2.imwrite('./logo2.jpg',logo2)
return img
def hsvModer(self, index, hsv_valueT, hsv_value_B):
img_BGR = self.img[index]
img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV)
lower_red = np.array(hsv_value_B)
upper_red = np.array(hsv_valueT)
mask = cv2.inRange(img_HSV, lower_red, upper_red)
res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask)
if self.erosion:
kernel = np.ones((5, 5), np.uint8)
res = cv2.erode(res, kernel, iterations=1)
if self.dilate:
kernel = np.ones((9, 9), np.uint8)
res = cv2.dilate(res, kernel, iterations=1)
return res
def find_components(edges, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
# Perform increasingly aggressive dilation until there are just a few
# connected components.
count = 21
dilation = 5
n = 1
while count > 16:
n += 1
dilated_image = dilate(edges, N=3, iterations=n)
contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = len(contours)
#print dilation
#Image.fromarray(edges).show()
#Image.fromarray(255 * dilated_image).show()
return contours
arch_light_track.py 文件源码
项目:Vision_Processing-2016
作者: Sabercat-Robotics-4146-FRC
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_bounding_rect( cap, win_cap, win, upper, lower):
msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3)
im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
if len(contours) > 0:
areas = [cv2.contourArea(c) for c in contours] # get the area of each contour
max_index = np.argmax(areas) # get the index of the largest contour by area
cnts = contours[max_index] # get the largest contout by area
cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image
x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour
cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box
cv2.imshow( "debug.", win_cap )
try:
self.smt_dash.putNumber('vis_x', x)
self.smt_dash.putNumber('vis_y', y)
self.smt_dash.putNumber('vis_w', w)
self.smt_dash.putNumber('vis_h', h)
except Exception:
pass
def img_pre_treatment(file_path):
im = cv2.imread(file_path)
resize_pic=cv2.resize(im,(640,480),interpolation=cv2.INTER_CUBIC)
resize_pic = cv2.GaussianBlur(resize_pic,(5,5),0)
cv2.imwrite('static/InterceptedIMG/resize.jpg',resize_pic)
kernel = np.ones((3,3),np.uint8)
resize_pic = cv2.erode(resize_pic,kernel,iterations = 3)
resize_pic = cv2.dilate(resize_pic,kernel,iterations = 3)
cv2.imshow('image',resize_pic)
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
gray = cv2.cvtColor(resize_pic,cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray,90,255,cv2.THRESH_BINARY)
cv2.imshow('image',binary)
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
return resize_pic,binary
def contrast_image(image, thresh1=180, thresh2=200, show=False):
image = imutils.resize(image, height=scale_factor)
# convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1]
thresh2 = cv2.erode(thresh, None, iterations=2)
thresh3 = cv2.dilate(thresh2, None, iterations=2)
if show is True: #this is for debugging puposes
cv2.imshow("Contrast", thresh3)
cv2.waitKey(0)
cv2.destroyAllWindows()
return thresh
def contrast_image(image, thresh1=180, thresh2=200, show=False):
image = imutils.resize(image, height=scale_factor)
# convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1]
thresh2 = cv2.erode(thresh, None, iterations=2)
thresh3 = cv2.dilate(thresh2, None, iterations=2)
if show is True: #this is for debugging puposes
cv2.imshow("Contrast", thresh3)
cv2.waitKey(0)
cv2.destroyAllWindows()
return thresh
def contrast_image(image, thresh1=180, thresh2=200, show=False):
image = imutils.resize(image, height=scale_factor)
# convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1]
thresh2 = cv2.erode(thresh, None, iterations=2)
thresh3 = cv2.dilate(thresh2, None, iterations=2)
if show is True: #this is for debugging puposes
cv2.imshow("Contrast", thresh3)
cv2.waitKey(0)
cv2.destroyAllWindows()
return thresh
def process_word(thresh,output):
# assign 2 rectangle kernel size 1 vertical and the other will be horizontal
kernel = np.ones((2,1), np.uint8)
kernel2 = np.ones((1,4), np.uint8)
# use closing morph operation but fewer iterations than the letter then erode to narrow the image
temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2)
#temp_img = cv2.erode(thresh,kernel,iterations=2)
word_img = cv2.dilate(temp_img,kernel2,iterations=1)
(contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)
return output
#processing line by line boxing
def process_line(thresh,output):
# assign a rectangle kernel size 1 vertical and the other will be horizontal
kernel = np.ones((1,5), np.uint8)
kernel2 = np.ones((2,4), np.uint8)
# use closing morph operation but fewer iterations than the letter then erode to narrow the image
temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2)
#temp_img = cv2.erode(thresh,kernel,iterations=2)
line_img = cv2.dilate(temp_img,kernel,iterations=5)
(contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)
return output
#processing par by par boxing