def _extract_spots(self) -> None:
# Dilate and Erode to 'clean' the spot (nb that this harms the number itself, so we only do it to extract spots)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img = cv2.dilate(self._img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=2)
img = cv2.dilate(img, kernel, iterations=1)
# Perform a simple blob detect
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 20 # The dot in 20pt font has area of about 30
params.filterByCircularity = True
params.minCircularity = 0.7
params.filterByConvexity = True
params.minConvexity = 0.8
params.filterByInertia = True
params.minInertiaRatio = 0.4
detector = cv2.SimpleBlobDetector_create(params)
self.spot_keypoints = detector.detect(img)
# Log intermediate image
img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))
python类erode()的实例源码
def apply_filters(self, image, denoise=False):
""" This method is used to apply required filters to the
to extracted regions of interest. Every square in a
sudoku square is considered to be a region of interest,
since it can potentially contain a value. """
# Convert to grayscale
source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Denoise the grayscale image if requested in the params
if denoise:
denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
# source_blur = denoised_gray
else:
source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
if ENABLE_PREVIEW_ALL:
image_preview(source_dilated)
return source_dilated
def calculate_entropy(image):
entropy = image.copy()
sum = 0
i = 0
j = 0
while i < entropy.shape[0]:
j = 0
while j < entropy.shape[1]:
sub_image = entropy[i:i+10,j:j+10]
histogram = cv2.calcHist([sub_image],[0],None,[256],[0,256])
sum = 0
for k in range(256):
if histogram[k] != 0:
sum = sum + (histogram[k] * math.log(histogram[k]))
k = k + 1
entropy[i:i+10,j:j+10] = sum
j = j+10
i = i+10
ret2,th2 = cv2.threshold(entropy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
newfin = cv2.erode(th2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
return newfin
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([70,50,50])
upper_green = np.array([85,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
print "no blobs"
im_with_keypoints=crop_frame
return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def dif_gaus(image, lower, upper):
lower, upper = int(lower-1), int(upper-1)
lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0)
upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0)
# upper +=50
# lower +=50
dif = lower-upper
# dif *= .1
# dif = cv2.medianBlur(dif,3)
# dif = 255-dif
dif = cv2.inRange(dif, np.asarray(200),np.asarray(256))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
dif = cv2.dilate(dif, kernel, iterations=2)
dif = cv2.erode(dif, kernel, iterations=1)
# dif = cv2.max(image,dif)
# dif = cv2.dilate(dif, kernel, iterations=1)
return dif
def calculate_entropy(image):
entropy = image.copy()
sum = 0
i = 0
j = 0
while i < entropy.shape[0]:
j = 0
while j < entropy.shape[1]:
sub_image = entropy[i:i+10,j:j+10]
histogram = cv2.calcHist([sub_image],[0],None,[256],[0,256])
sum = 0
for k in range(256):
if histogram[k] != 0:
sum = sum + (histogram[k] * math.log(histogram[k]))
k = k + 1
entropy[i:i+10,j:j+10] = sum
j = j+10
i = i+10
ret2,th2 = cv2.threshold(entropy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
newfin = cv2.erode(th2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
return newfin
def random_z_rotation(rgb, depth, pose, camera):
rotation = random.uniform(-180, 180)
rotation_matrix = Transform()
rotation_matrix.set_rotation(0, 0, math.radians(rotation))
pixel = center_pixel(pose, camera)
new_rgb = rotate_image(rgb, rotation, pixel[0])
new_depth = rotate_image(depth, rotation, pixel[0])
# treshold below 50 means we remove some interpolation noise, which cover small holes
mask = (new_depth >= 50).astype(np.uint8)[:, :, np.newaxis]
rgb_mask = np.all(new_rgb != 0, axis=2).astype(np.uint8)
kernel = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], np.uint8)
# erode rest of interpolation noise which will affect negatively future blendings
eroded_mask = cv2.erode(mask, kernel, iterations=2)
eroded_rgb_mask = cv2.erode(rgb_mask, kernel, iterations=2)
new_depth = new_depth * eroded_mask
new_rgb = new_rgb * eroded_rgb_mask[:, :, np.newaxis]
new_pose = combine_view_transform(pose, rotation_matrix)
return new_rgb, new_depth, new_pose
def enhance(img,blockSize=8,boxSize=4):
"""image enhancement
return: enhanced image
"""
# img=cv2.equalizeHist(np.uint8(img))
img,imgfore=segmentation(img)
# img=blockproc(np.uint8(img),cv2.equalizeHist,(16,16))
img=img.copy(order='C').astype(np.float64)
theta=_pre.calcDirectionBox(img,blockSize,boxSize)
wl=calcWlBox(img,blockSize,boxSize)
sigma=5
img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
img=np.asarray(img)
imgfore=cv2.erode(imgfore,np.ones((8,8)),iterations=4)
img[np.where(imgfore==0)]=255
img=basic.truncate(img,method='default')
return img,imgfore
def _do_filter(self, frame):
''' Process a single frame. '''
# blur to reduce noise
frame = cv2.GaussianBlur(frame, (5, 5), 0, borderType=cv2.BORDER_CONSTANT)
# threshold to find contiguous regions of "bright" pixels
# ignore all "dark" (<1/8 max) pixels
max = numpy.max(frame)
min = numpy.min(frame)
# if the frame is completely dark, then just return it
if max == min:
return frame
threshold = min + (max - min) / 8
_, frame = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY)
# filter out single pixels and other noise
frame = cv2.erode(frame, self._element_shrink)
# restore and join nearby regions (in case one fish has a skinny middle...)
frame = cv2.dilate(frame, self._element_grow)
return frame
def __init__(self):
super(TargetFilterBGSub, self).__init__()
# background subtractor
#self._bgs = cv2.BackgroundSubtractorMOG()
#self._bgs = cv2.BackgroundSubtractorMOG2() # not great defaults, and need bShadowDetection to be False
#self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20)
# varThreshold: higher values detect fewer/smaller changed regions
self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False)
# ??? history is ignored? Only if learning_rate is > 0, or...? Unclear.
# Learning rate for background subtractor.
# 0 = never adapts after initial background creation.
# A bit above 0 looks good.
# Lower values are better for detecting slower movement, though it
# takes a bit of time to learn the background initially.
self._learning_rate = 0.001
# elements to reuse in erode/dilate
# CROSS elimates more horizontal/vertical lines and leaves more
# blobs with extent in both axes [than RECT].
self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def checkAvailability(sift, tkp, tdes, matchimg):
"""
:param sift:
:param tkp:
:param tdes:sift feature object, template keypoints, and template descriptor
:param matchimg:
:return:
"""
qimg = cv2.imread(matchimg)
qimggray = cv2.cvtColor(qimg,cv2.COLOR_BGR2GRAY)
# kernel = np.ones((5,5), np.uint8)
# qimggray = cv2.erode(qimggray, kernel, iterations=1)
# ret,threshimg = cv2.threshold(qimggray,100,255,cv2.THRESH_BINARY)
qkp,qdes = sift.detectAndCompute(qimggray, None)
# plt.imshow(threshimg, 'gray'), plt.show()
FLANN_INDEX_KDITREE=0
index_params=dict(algorithm=FLANN_INDEX_KDITREE,tree=5)
# FLANN_INDEX_LSH = 6
# index_params = dict(algorithm=FLANN_INDEX_LSH,
# table_number=12, # 12
# key_size=20, # 20
# multi_probe_level=2) # 2
search_params = dict(checks = 50)
flann=cv2.FlannBasedMatcher(index_params,search_params)
matches=flann.knnMatch(tdes,qdes,k=2)
goodMatch=[]
for m_n in matches:
if len(m_n) != 2:
continue
m, n = m_n
if(m.distance<0.75*n.distance):
goodMatch.append(m)
MIN_MATCH_COUNT = 30
if (len(goodMatch) >= MIN_MATCH_COUNT):
tp = []
qp = []
for m in goodMatch:
tp.append(tkp[m.queryIdx].pt)
qp.append(qkp[m.trainIdx].pt)
tp, qp = np.float32((tp, qp))
H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)
h = timg.shape[0]
w = timg.shape[1]
trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]])
queryBorder = cv2.perspectiveTransform(trainBorder, H)
cv2.polylines(qimg, [np.int32(queryBorder)], True, (0, 255, 0), 5)
cv2.imshow('result', qimg)
plt.imshow(qimg, 'gray'), plt.show()
return True
else:
print "Not Enough match found- %d/%d" % (len(goodMatch), MIN_MATCH_COUNT)
return False
# cv2.imshow('result', qimg)
# if cv2.waitKey(10) == ord('q'):
# cv2.destroyAllWindows()
def process(img):
img=cv2.medianBlur(img,5)
kernel=np.ones((3,3),np.uint8)
#img=cv2.erode(img,kernel,iterations = 1)
sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
dilation = cv2.dilate(sobel, element2, iterations = 1)
erosion = cv2.erode(dilation, element1, iterations = 1)
dilation2 = cv2.dilate(erosion, element2,iterations = 3)
#img=cv2.dilate(img,kernel,iterations = 1)
#img=cv2.Canny(img,100,200)
return dilation2
def logoDetect(img,imgo):
'''???????????????'''
imglogo=imgo.copy()
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
#img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
img=cv2.Canny(img,100,200)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img = cv2.dilate(img, element2,iterations = 1)
img = cv2.erode(img, element1, iterations = 3)
img = cv2.dilate(img, element2,iterations = 3)
#????
im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
tema=0
result=[]
for con in contours:
x,y,w,h=cv2.boundingRect(con)
area=w*h
ratio=max(w/h,h/w)
if area>300 and area<20000 and ratio<2:
if area>tema:
tema=area
result=[x,y,w,h]
ratio2=ratio
#?????????????????,??????????
logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
print tema,ratio2,result
logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
cv2.imwrite('./logo2.jpg',logo2)
return img
def hsvModer(self, index, hsv_valueT, hsv_value_B):
img_BGR = self.img[index]
img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV)
lower_red = np.array(hsv_value_B)
upper_red = np.array(hsv_valueT)
mask = cv2.inRange(img_HSV, lower_red, upper_red)
res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask)
if self.erosion:
kernel = np.ones((5, 5), np.uint8)
res = cv2.erode(res, kernel, iterations=1)
if self.dilate:
kernel = np.ones((9, 9), np.uint8)
res = cv2.dilate(res, kernel, iterations=1)
return res
def find_lines(img, acc_threshold=0.25, should_erode=True):
if len(img.shape) == 3 and img.shape[2] == 3: # if it's color
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = cv2.GaussianBlur(img, (11, 11), 0)
img = cv2.adaptiveThreshold(
img,
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
5,
2)
img = cv2.bitwise_not(img)
# thresh = 127
# edges = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
# edges = cv2.Canny(blur, 500, 500, apertureSize=3)
if should_erode:
element = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
img = cv2.erode(img, element)
theta = np.pi/2000
angle_threshold = 2
horizontal = cv2.HoughLines(
img,
1,
theta,
int(acc_threshold * img.shape[1]),
min_theta=np.radians(90 - angle_threshold),
max_theta=np.radians(90 + angle_threshold))
vertical = cv2.HoughLines(
img,
1,
theta,
int(acc_threshold * img.shape[0]),
min_theta=np.radians(-angle_threshold),
max_theta=np.radians(angle_threshold),
)
horizontal = list(horizontal) if horizontal is not None else []
vertical = list(vertical) if vertical is not None else []
horizontal = [line[0] for line in horizontal]
vertical = [line[0] for line in vertical]
horizontal = np.asarray(horizontal)
vertical = np.asarray(vertical)
return horizontal, vertical
def postprocess_colormap(cls, postprocess=True):
"""Create a colormap out of the classes and postprocess the face."""
batch = vs.apply_colormap(cls, vmin=0, vmax=21, cmap=CMAP)
cmap = vs.apply_colormap(np.array(range(22), dtype='uint8'),
vmin=0, vmax=21, cmap=CMAP)
COLSET = cmap[18:22]
FCOL = cmap[11]
if postprocess:
kernel = np.ones((2, 2), dtype=np.uint8)
for im in batch:
for col in COLSET:
# Extract the map of the matching color.
colmap = np.all(im == col, axis=2).astype(np.uint8)
# Erode.
while np.sum(colmap) > 10:
colmap = cv2.erode(colmap, kernel)
# Prepare the original map for remapping.
im[np.all(im == col, axis=2)] = FCOL
# Backproject.
im[colmap == 1] = col
return batch[:, :, :, :3]
def postprocess_colormap(cls, postprocess=True):
"""Create a colormap out of the classes and postprocess the face."""
batch = vs.apply_colormap(cls, vmin=0, vmax=21, cmap=CMAP)
cmap = vs.apply_colormap(np.array(range(22), dtype='uint8'),
vmin=0, vmax=21, cmap=CMAP)
COLSET = cmap[18:22]
FCOL = cmap[11]
if postprocess:
kernel = np.ones((2, 2), dtype=np.uint8)
for im in batch:
for col in COLSET:
# Extract the map of the matching color.
colmap = np.all(im == col, axis=2).astype(np.uint8)
# Erode.
while np.sum(colmap) > 10:
colmap = cv2.erode(colmap, kernel)
# Prepare the original map for remapping.
im[np.all(im == col, axis=2)] = FCOL
# Backproject.
im[colmap == 1] = col
return batch[:, :, :, :3]
def postprocess_colormap(cls, postprocess=True):
"""Create a colormap out of the classes and postprocess the face."""
batch = vs.apply_colormap(cls, vmin=0, vmax=21, cmap=CMAP)
cmap = vs.apply_colormap(np.array(range(22), dtype='uint8'),
vmin=0, vmax=21, cmap=CMAP)
COLSET = cmap[18:22]
FCOL = cmap[11]
if postprocess:
kernel = np.ones((2, 2), dtype=np.uint8)
for im in batch:
for col in COLSET:
# Extract the map of the matching color.
colmap = np.all(im == col, axis=2).astype(np.uint8)
# Erode.
while np.sum(colmap) > 10:
colmap = cv2.erode(colmap, kernel)
# Prepare the original map for remapping.
im[np.all(im == col, axis=2)] = FCOL
# Backproject.
im[colmap == 1] = col
return batch[:, :, :, :3]
def postprocess_colormap(cls, postprocess=True):
"""Create a colormap out of the classes and postprocess the face."""
batch = vs.apply_colormap(cls, vmin=0, vmax=21, cmap=CMAP)
cmap = vs.apply_colormap(np.array(range(22), dtype='uint8'),
vmin=0, vmax=21, cmap=CMAP)
COLSET = cmap[18:22]
FCOL = cmap[11]
if postprocess:
kernel = np.ones((2, 2), dtype=np.uint8)
for im in batch:
for col in COLSET:
# Extract the map of the matching color.
colmap = np.all(im == col, axis=2).astype(np.uint8)
# Erode.
while np.sum(colmap) > 10:
colmap = cv2.erode(colmap, kernel)
# Prepare the original map for remapping.
im[np.all(im == col, axis=2)] = FCOL
# Backproject.
im[colmap == 1] = col
return batch[:, :, :, :3]
def postprocess_colormap(cls, postprocess=True):
"""Create a colormap out of the classes and postprocess the face."""
batch = vs.apply_colormap(cls, vmin=0, vmax=21, cmap=CMAP)
cmap = vs.apply_colormap(np.array(range(22), dtype='uint8'),
vmin=0, vmax=21, cmap=CMAP)
COLSET = cmap[18:22]
FCOL = cmap[11]
if postprocess:
kernel = np.ones((2, 2), dtype=np.uint8)
for im in batch:
for col in COLSET:
# Extract the map of the matching color.
colmap = np.all(im == col, axis=2).astype(np.uint8)
# Erode.
while np.sum(colmap) > 10:
colmap = cv2.erode(colmap, kernel)
# Prepare the original map for remapping.
im[np.all(im == col, axis=2)] = FCOL
# Backproject.
im[colmap == 1] = col
return batch[:, :, :, :3]
def img_pre_treatment(file_path):
im = cv2.imread(file_path)
resize_pic=cv2.resize(im,(640,480),interpolation=cv2.INTER_CUBIC)
resize_pic = cv2.GaussianBlur(resize_pic,(5,5),0)
cv2.imwrite('static/InterceptedIMG/resize.jpg',resize_pic)
kernel = np.ones((3,3),np.uint8)
resize_pic = cv2.erode(resize_pic,kernel,iterations = 3)
resize_pic = cv2.dilate(resize_pic,kernel,iterations = 3)
cv2.imshow('image',resize_pic)
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
gray = cv2.cvtColor(resize_pic,cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray,90,255,cv2.THRESH_BINARY)
cv2.imshow('image',binary)
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
return resize_pic,binary
def contrast_image(image, thresh1=180, thresh2=200, show=False):
image = imutils.resize(image, height=scale_factor)
# convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1]
thresh2 = cv2.erode(thresh, None, iterations=2)
thresh3 = cv2.dilate(thresh2, None, iterations=2)
if show is True: #this is for debugging puposes
cv2.imshow("Contrast", thresh3)
cv2.waitKey(0)
cv2.destroyAllWindows()
return thresh
def contrast_image(image, thresh1=180, thresh2=200, show=False):
image = imutils.resize(image, height=scale_factor)
# convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1]
thresh2 = cv2.erode(thresh, None, iterations=2)
thresh3 = cv2.dilate(thresh2, None, iterations=2)
if show is True: #this is for debugging puposes
cv2.imshow("Contrast", thresh3)
cv2.waitKey(0)
cv2.destroyAllWindows()
return thresh
def contrast_image(image, thresh1=180, thresh2=200, show=False):
image = imutils.resize(image, height=scale_factor)
# convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1]
thresh2 = cv2.erode(thresh, None, iterations=2)
thresh3 = cv2.dilate(thresh2, None, iterations=2)
if show is True: #this is for debugging puposes
cv2.imshow("Contrast", thresh3)
cv2.waitKey(0)
cv2.destroyAllWindows()
return thresh
def process_letter(thresh,output):
# assign the kernel size
kernel = np.ones((2,1), np.uint8) # vertical
# use closing morph operation then erode to narrow the image
temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
# temp_img = cv2.erode(thresh,kernel,iterations=2)
letter_img = cv2.erode(temp_img,kernel,iterations=1)
# find contours
(contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# loop in all the contour areas
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)
return output
#processing letter by letter boxing
def process_word(thresh,output):
# assign 2 rectangle kernel size 1 vertical and the other will be horizontal
kernel = np.ones((2,1), np.uint8)
kernel2 = np.ones((1,4), np.uint8)
# use closing morph operation but fewer iterations than the letter then erode to narrow the image
temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2)
#temp_img = cv2.erode(thresh,kernel,iterations=2)
word_img = cv2.dilate(temp_img,kernel2,iterations=1)
(contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)
return output
#processing line by line boxing
def process_line(thresh,output):
# assign a rectangle kernel size 1 vertical and the other will be horizontal
kernel = np.ones((1,5), np.uint8)
kernel2 = np.ones((2,4), np.uint8)
# use closing morph operation but fewer iterations than the letter then erode to narrow the image
temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2)
#temp_img = cv2.erode(thresh,kernel,iterations=2)
line_img = cv2.dilate(temp_img,kernel,iterations=5)
(contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)
return output
#processing par by par boxing
def process_image(image):
"""
Args:
image: The image to process
Returns:
sub_image: The rotated and extracted.
"""
# Convert image to black and white - we cannot take the photos in black and white as we
# must first search for the red triangle.
if len(image.shape) == 3:
processed_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
processed_img = image
if config.real_hardware:
num_iterations = 8
else:
num_iterations = 8
processed_img = cv2.GaussianBlur(processed_img, (21, 21), 0)
_, processed_img = cv2.threshold(processed_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Put a border around the image to stop the edges of the images creating artifacts.
padded_image = np.zeros((processed_img.shape[0] + 10, processed_img.shape[1] + 10), np.uint8)
padded_image[5:processed_img.shape[0]+5, 5:processed_img.shape[1]+5] = processed_img
kernel = np.array([[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0]], np.uint8)
padded_image = cv2.erode(padded_image, kernel, iterations=num_iterations)
processed_img = padded_image[25:padded_image.shape[0] - 25, 25:padded_image.shape[1] - 25]
#cv2.imshow('Padded Image', padded_image)
#cv2.imshow('Processed image', processed_img)
#cv2.waitKey(0)
# Debugging code - useful to show the images are being eroded correctly.
#spacer = processed_img[:, 0:2].copy()
#spacer.fill(100)
#combined_image = np.concatenate((processed_img, spacer), axis=1)
#combined_image = np.concatenate((combined_image, image), axis=1)
#cv2.imshow('PreProcessed and Processed Image', combined_image)
#cv2.waitKey(0)
# Save sub_image to debug folder if required.
if __debug__:
iadebug.save_processed_image(processed_img)
return processed_img
def detect_shirt(self):
#self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
fg=cv2.erode(self.dst,None,iterations=2)
#cv2.imshow("fore",fg)
bg=cv2.dilate(self.dst,None,iterations=3)
_,bg=cv2.threshold(bg, 1,128,1)
#cv2.imshow("back",bg)
mark=cv2.add(fg,bg)
mark32=np.int32(mark)
cv2.watershed(self.norm_rgb,mark32)
self.m=cv2.convertScaleAbs(mark32)
_,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#cv2.imshow("final_tshirt",self.m)
cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
return self.m,cntr
def movement(mat_1,mat_2):
mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1))
_,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0)
mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1))
_,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0)
mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2))
_,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0)
mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4)))
_, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:return True #If there were any movements
return False #if not
#Pedestrian Recognition Thread