def foreground(self, image, smooth=False, grayscale=False):
"""
Extract foreground from background
:param image:
:param smooth:
:param grayscale:
:return:
"""
if smooth and grayscale:
image = self.toGrayscale(image)
image = self.smooth(image)
elif smooth:
image = self.smooth(image)
elif grayscale:
image = self.toGrayscale(image)
fgmask = self.fgbg.apply(image)
ret, mask = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
return mask_inv
python类threshold()的实例源码
def overlay_img(self):
"""Overlay the transparent, transformed image of the arc onto our CV image"""
#overlay the arc on the image
rows, cols, channels = self.transformed.shape
roi = self.cv_image[0:rows, 0:cols]
#change arc_image to grayscale
arc2gray = cv2.cvtColor(self.transformed, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(arc2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
#black out area of arc in ROI
img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img2_fg = cv2.bitwise_and(self.transformed, self.transformed, mask=mask)
#put arc on ROI and modify the main image
dst = cv2.add(img1_bg, img2_fg)
self.cv_image[0:rows, 0:cols] = dst
def extract_color( src, h_th_low, h_th_up, s_th, v_th ):
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
if h_th_low > h_th_up:
ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY)
ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV)
dst = cv2.bitwise_or(h_dst_1, h_dst_2)
else:
ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO)
ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV)
ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY)
ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY)
ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY)
dst = cv2.bitwise_and(dst, s_dst)
dst = cv2.bitwise_and(dst, v_dst)
return dst
def calculate_entropy(image):
entropy = image.copy()
sum = 0
i = 0
j = 0
while i < entropy.shape[0]:
j = 0
while j < entropy.shape[1]:
sub_image = entropy[i:i+10,j:j+10]
histogram = cv2.calcHist([sub_image],[0],None,[256],[0,256])
sum = 0
for k in range(256):
if histogram[k] != 0:
sum = sum + (histogram[k] * math.log(histogram[k]))
k = k + 1
entropy[i:i+10,j:j+10] = sum
j = j+10
i = i+10
ret2,th2 = cv2.threshold(entropy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
newfin = cv2.erode(th2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
return newfin
find_rect_and_transform.py 文件源码
项目:quadrilaterals-rectifier
作者: michal2229
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def extract_rect(im):
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# finding contour with max area
largest = None
for cnt in contours:
if largest == None or cv2.contourArea(cnt) > cv2.contourArea(largest):
largest = cnt
peri = cv2.arcLength(largest, True)
appr = cv2.approxPolyDP(largest, 0.02 * peri, True)
#cv2.drawContours(im, appr, -1, (0,255,0), 3)
points_list = [[i[0][0], i[0][1]] for i in appr]
left = sorted(points_list, key = lambda p: p[0])[0:2]
right = sorted(points_list, key = lambda p: p[0])[2:4]
print("l " + str(left))
print("r " + str(right))
lu = sorted(left, key = lambda p: p[1])[0]
ld = sorted(left, key = lambda p: p[1])[1]
ru = sorted(right, key = lambda p: p[1])[0]
rd = sorted(right, key = lambda p: p[1])[1]
print("lu " + str(lu))
print("ld " + str(ld))
print("ru " + str(ru))
print("rd " + str(rd))
lu_ = [ (lu[0] + ld[0])/2, (lu[1] + ru[1])/2 ]
ld_ = [ (lu[0] + ld[0])/2, (ld[1] + rd[1])/2 ]
ru_ = [ (ru[0] + rd[0])/2, (lu[1] + ru[1])/2 ]
rd_ = [ (ru[0] + rd[0])/2, (ld[1] + rd[1])/2 ]
print("lu_ " + str(lu_))
print("ld_ " + str(ld_))
print("ru_ " + str(ru_))
print("rd_ " + str(rd_))
src_pts = np.float32(np.array([lu, ru, rd, ld]))
dst_pts = np.float32(np.array([lu_, ru_, rd_, ld_]))
h,w,b = im.shape
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
print("H" + str(H))
imw = cv2.warpPerspective(im, H, (w, h))
return imw[lu_[1]:rd_[1], lu_[0]:rd_[0]] # cropping image
def recognize_text(original):
idcard = original
gray = cv2.cvtColor(idcard, cv2.COLOR_BGR2GRAY)
# Morphological gradient:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
opening = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)
# Binarization
ret, binarization = cv2.threshold(opening, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# Connected horizontally oriented regions
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(binarization, cv2.MORPH_CLOSE, kernel)
# find countours
_, contours, hierarchy = cv2.findContours(
connected, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE
)
return contours, hierarchy
def camera_gesture_trigger():
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
max_area=0
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area=area
ci=i
cnt=contours[ci]
hull = cv2.convexHull(cnt)
moments = cv2.moments(cnt)
cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
if defects is not None:
if defects.shape[0] >= 5:
return 1
return 0
def _extract_arm(self, img):
# find center region of image frame (assume center region is 21 x 21 px)
center_half = 10 # (=(21-1)/2)
center = img[self.height/2 - center_half : self.height/2 + center_half, self.width/2 - center_half : self.width/2 + center_half]
# determine median depth value
median_val = np.median(center)
'''mask the image such that all pixels whose depth values
lie within a particular range are gray and the rest are black
'''
img = np.where(abs(img-median_val) <= self.abs_depth_dev, 128, 0).astype(np.uint8)
# Apply morphology operation to fill small holes in the image
kernel = np.ones((5,5), np.uint8)
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
# Find connected regions (to hand) to remove background objects
# Use floodfill with a small image area (7 x 7 px) that is set gray color value
kernel2 = 3
img[self.height/2-kernel2:self.height/2+kernel2, self.width/2-kernel2:self.width/2+kernel2] = 128
# a black mask to mask the 'non-connected' components black
mask = np.zeros((self.height + 2, self.width + 2), np.uint8)
floodImg = img.copy()
# Use floodFill function to paint the connected regions white
cv2.floodFill(floodImg, mask, (self.width/2, self.height/2), 255, flags=(4 | 255 << 8))
# apply a binary threshold to show only connected hand region
ret, floodedImg = cv2.threshold(floodImg, 129, 255, cv2.THRESH_BINARY)
return floodedImg
def thresholding(img_grey):
"""
This functions creates binary images using thresholding
:param img_grey: greyscale image
:return: binary image
"""
# # Adaptive Gaussian
# img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(img_grey, (5, 5), 0)
ret3, img_binary = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# invert black = 255
ret, thresh1 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
return thresh1
def thresholding(img_grey):
"""
This functions creates binary images using thresholding
:param img_grey: greyscale image
:return: binary image
"""
# # Global
# ret1, thresh1 = cv.threshold(img_grey, 127, 255, cv.THRESH_BINARY_INV)
# show_img(thresh1)
#
# # Adaptive Mean
# img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
# ret2, thresh2 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
# show_img(thresh2)
#
# # Adaptive Gaussian
# img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
# ret3, thresh3 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
# show_img(thresh3)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(img_grey, (5, 5), 0)
ret4, img_otsu = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
ret4, thresh4 = cv.threshold(img_otsu, 127, 255, cv.THRESH_BINARY_INV)
# show_img(thresh4)
return thresh4
Artificial-Potential-1.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def goal_force(arr, sx, sy, dx, dy, d_star): # sx, sy :- source dx, dy:- destination d_star:- threshold distance from goal
forcex = 0
forcey = 0
tau = 1 #constant
printx('10')
d = math.sqrt((dx-sx)*(dx-sx) + (dy-sy)*(dy-sy))
if d > d_star:
forcex += ((d_star*tau*math.sin(math.atan2(dx-sx, dy-sy))))
forcey += ((d_star*tau*math.cos(math.atan2(dx-sx, dy-sy))))
else:
forcex += ((dx-sx)*tau)
forcey += ((dy-sy)*tau)
printx('11')
return (forcex, forcey)
Artificial-potential-without-controller.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def goal_force(arr, sx, sy, dx, dy, d_star): # sx, sy :- source dx, dy:- destination d_star:- threshold distance from goal
forcex = 0
forcey = 0
tau = 1 #constant
printx('10')
d = math.sqrt((dx-sx)*(dx-sx) + (dy-sy)*(dy-sy))
if d > d_star:
forcex += ((d_star*tau*math.sin(math.atan2(dx-sx, dy-sy))))
forcey += ((d_star*tau*math.cos(math.atan2(dx-sx, dy-sy))))
else:
forcex += ((dx-sx)*tau)
forcey += ((dy-sy)*tau)
printx('11')
return (forcex, forcey)
Artificial-potential-without-controller.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def classify(img):
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img2 = cv2.medianBlur(cimg, 13)
ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
t2 = copy.copy(thresh1)
x, y = thresh1.shape
arr = np.zeros((x, y, 3), np.uint8)
final_contours = []
image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.imshow('image', image)
#k = cv2.waitKey(0)
for i in range(len(contours)):
cnt = contours[i]
if cv2.contourArea(cnt) > 35000 and cv2.contourArea(cnt) < 15000:
cv2.drawContours(img, [cnt], -1, [0, 255, 255])
cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
final_contours.append(cnt)
cv2.imshow('arr', arr)
k = cv2.waitKey(0)
return arr
Artificial-potential-controller.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def goal_force(arr, sx, sy, dx, dy, d_star): # sx, sy :- source dx, dy:- destination d_star:- threshold distance from goal
forcex = 0
forcey = 0
tau = 1000000 #constant
printx('10')
d = math.sqrt((dx-sx)*(dx-sx) + (dy-sy)*(dy-sy))
if d > d_star:
forcex += ((d_star*tau*math.sin(math.atan2(dx-sx, dy-sy))))
forcey += ((d_star*tau*math.cos(math.atan2(dx-sx, dy-sy))))
else:
forcex += ((dx-sx)*tau)
forcey += ((dy-sy)*tau)
printx('11')
return (forcex, forcey)
Artificial-potential-controller.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def classify(img):
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img2 = cv2.medianBlur(cimg, 13)
ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
t2 = copy.copy(thresh1)
x, y = thresh1.shape
arr = np.zeros((x, y, 3), np.uint8)
final_contours = []
image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.imshow('image', image)
#k = cv2.waitKey(0)
for i in range(len(contours)):
cnt = contours[i]
if cv2.contourArea(cnt) > 3600 and cv2.contourArea(cnt) < 25000:
cv2.drawContours(img, [cnt], -1, [0, 255, 255])
cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
final_contours.append(cnt)
cv2.imshow('arr', arr)
k = cv2.waitKey(0)
return arr
Artificial-Potential-final.py 文件源码
项目:Artificial-Potential-Field
作者: vampcoder
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def goal_force(arr, sx, sy, dx, dy, d_star): # sx, sy :- source dx, dy:- destination d_star:- threshold distance from goal
forcex = 0
forcey = 0
tau = 20 #constant
printx('10')
d = math.sqrt((dx-sx)*(dx-sx) + (dy-sy)*(dy-sy))
if d > d_star:
forcex += ((d_star*tau*math.sin(math.atan2(dx-sx, dy-sy))))
forcey += ((d_star*tau*math.cos(math.atan2(dx-sx, dy-sy))))
else:
forcex += ((dx-sx)*tau)
forcey += ((dy-sy)*tau)
printx('11')
return (forcex, forcey)
def getNextWindow(temp_p_map, threshold):
p = WinProp()
loc = np.argmax(temp_p_map)
p.y = loc / temp_p_map.shape[1]
p.x = loc % temp_p_map.shape[1]
p.val = temp_p_map[p.y,p.x]
if p.val > threshold:
p.have_max = True
else:
p.have_max = False
return p
# ================================================================================================
def get_image_xy_corner(self):
"""get ?artesian coordinates from raster"""
import cv2
if not self.image_path:
return False
image_xy_corners = []
img = cv2.imread(self.image_path, cv2.IMREAD_GRAYSCALE)
imagem = (255 - img)
try:
ret, thresh = cv2.threshold(imagem, 10, 128, cv2.THRESH_BINARY)
try:
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except Exception:
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
hierarchy = hierarchy[0]
hierarhy_contours = [[] for _ in range(len(hierarchy))]
for fry in range(len(contours)):
currentContour = contours[fry]
currentHierarchy = hierarchy[fry]
cc = []
# epsilon = 0.0005 * cv2.arcLength(contours[len(contours) - 1], True)
approx = cv2.approxPolyDP(currentContour, self.epsilon, True)
if len(approx) > 2:
for c in approx:
cc.append([c[0][0], c[0][1]])
parent_index = currentHierarchy[3]
index = fry if parent_index < 0 else parent_index
hierarhy_contours[index].append(cc)
image_xy_corners = [c for c in hierarhy_contours if len(c) > 0]
return image_xy_corners
except Exception as ex:
self.error(ex)
return image_xy_corners
car_recognizer.py 文件源码
项目:Vision-based-parking-lot-availability-OpenCV
作者: Saar1312
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def getEdges(gray,detector,min_thr=None,max_thr=None):
"""
Where detector in {1,2,3,4}
1: Laplacian
2: Sobelx
3: Sobely
4: Canny
5: Sobelx with possitive and negative slope (in 2 negative slopes are lost)
"""
if min_thr is None:
min_thr = 100
max_thr = 200
if detector == 1:
return cv2.Laplacian(gray,cv2.CV_64F)
elif detector == 2:
return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1)
elif detector == 3:
return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1)
elif detector == 4:
return cv2.Canny(gray,min_thr,max_thr) # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the
# intensity gradient -value that measures how different is a pixel to its neighbors-)
elif detector == 5:
sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5)
abs_sobel64f = np.absolute(sobelx64f)
return np.uint8(abs_sobel64f)
def merge_recs(recs, threshold):
filtered_recs = []
while len(recs) > 0:
r = recs.pop(0)
recs.sort(key=lambda rec: rec.distance(r))
merged = True
while(merged):
merged = False
i = 0
for _ in range(len(recs)):
if r.overlap(recs[i]) > threshold or recs[i].overlap(r) > threshold:
r = r.merge(recs.pop(i))
merged = True
elif recs[i].distance(r) > r.w/2 + recs[i].w/2:
break
else:
i += 1
filtered_recs.append(r)
return filtered_recs