def detect_shirt(self):
#self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
fg=cv2.erode(self.dst,None,iterations=2)
#cv2.imshow("fore",fg)
bg=cv2.dilate(self.dst,None,iterations=3)
_,bg=cv2.threshold(bg, 1,128,1)
#cv2.imshow("back",bg)
mark=cv2.add(fg,bg)
mark32=np.int32(mark)
cv2.watershed(self.norm_rgb,mark32)
self.m=cv2.convertScaleAbs(mark32)
_,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#cv2.imshow("final_tshirt",self.m)
cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
return self.m,cntr
python类dilate()的实例源码
def movement(mat_1,mat_2):
mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1))
_,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0)
mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1))
_,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0)
mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2))
_,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0)
mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4)))
_, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:return True #If there were any movements
return False #if not
#Pedestrian Recognition Thread
def get_init_process_img(roi_img):
"""
?????????????????????????????????????
:param roi_img: ndarray
:return: ndarray
"""
h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
img = cv2.add(h, v)
img = cv2.convertScaleAbs(img)
img = cv2.GaussianBlur(img, (3, 3), 0)
ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
kernel = np.ones((1, 1), np.uint8)
img = cv2.erode(img, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=2)
img = cv2.erode(img, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=2)
img = auto_canny(img)
return img
def dilate(im, iterations=1):
return cv2.dilate(im, None, iterations=iterations)
def erode_dilate(im, iterations=1):
return dilate(erode(im, iterations=iterations), iterations)
def dilate_erode(im, iterations=1):
return erode(dilate(im, iterations=iterations), iterations)
def equalize(image, image_lower=0.0, image_upper=255.0):
image_lower = int(image_lower*2)/2
image_lower +=1
image_lower = max(3,image_lower)
mean = cv2.medianBlur(image,255)
image = image - (mean-100)
# kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
# cv2.dilate(image, kernel, image, iterations=1)
return image
def background_subtract(self, img_src):
fgmask = self.fgbg.apply(cv2.GaussianBlur(img_src, (25, 25), 0))
kernel = np.ones((5, 5), np.uint8)
fgmask = cv2.dilate(fgmask, kernel, iterations=2)
#fgmask = self.fgbg.apply(cv2.medianBlur(img_src, 11))
org_fg = cv2.bitwise_and(img_src, img_src, mask=fgmask)
return org_fg
# Update Position of ROI
def alpha_image(img, points, blur=0, dilate=0):
mask = mask_from_points(img.shape[:2], points)
if dilate > 0:
kernel = np.ones((dilate, vdilate), np.uint8)
mask = cv2.dilate(mask, kernel)
if blur > 0:
mask = cv2.blur(mask, (blur, blur))
return np.dstack((img, mask))
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
img = obj.imageNode.ViewObject.Proxy.img.copy()
print (obj.blockSize,obj.ksize,obj.k)
try:
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
print "normale"
except:
im2=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY)
print "except"
dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
dst2=img.copy()
dst2[dst<0.01*dst.max()]=[255,255,255]
dst2[dst>0.01*dst.max()]=[0,0,255]
if not obj.matplotlib:
cv2.imshow(obj.Label,img)
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst2,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
self.img=img
def execute_Morphing(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
ks=obj.kernel
kernel = np.ones((ks,ks),np.uint8)
if obj.filter == 'dilation':
dilation = cv2.dilate(img,kernel,iterations = 1)
img=dilation
if obj.filter == 'erosion':
dilation = cv2.erode(img,kernel,iterations = 1)
img=dilation
if obj.filter == 'opening':
dilation = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img=dilation
if obj.filter == 'closing':
dilation = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
img=dilation
obj.Proxy.img = img
#
# property functions for HoughLines
#
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
print (obj.blockSize,obj.ksize,obj.k)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
# dst = cv2.cornerHarris(gray,3,3,0.00001)
dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
if True:
print "zeige"
cv2.imshow(obj.Label,img)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=img
def checkForSkin(IMG10):
high,widt=IMG10.shape[:2]
B1=np.reshape(np.float32(IMG10[:,:,0]),high*widt)#B
G1=np.reshape(np.float32(IMG10[:,:,1]),high*widt)#G
R1=np.reshape(np.float32(IMG10[:,:,2]),high*widt)#Rs
#print high,widt
h3=np.zeros((high,widt,3),np.uint8)
#cv2.imshow("onetime",h)
tem=np.logical_and(np.logical_and(np.logical_and(np.logical_and(R1 > 95, G1 > 40),np.logical_and(B1 > 20, (np.maximum(np.maximum(R1,G1),B1) - np.minimum(np.minimum(R1,G1),B1)) > 15)),R1>B1),np.logical_and(np.absolute(R1-G1) > 15,R1>G1))
h5=np.array(tem).astype(np.uint8,order='C',casting='unsafe')
h5=np.reshape(h5,(high,widt))
h3[:,:,0]=h5
h3[:,:,1]=h5
h3[:,:,2]=h5
#cv2.imshow("thirdtime",h3)
kernel1 = np.ones((3,3),np.uint8)
closedH3=np.copy(h3)
for i in range(5):
closedH3 = cv2.erode(closedH3,kernel1)
for i in range(5):
closedH3 = cv2.dilate(closedH3,kernel1)
#cv2.imshow("closedH3",closedH3)
# closedH3 = cv2.cvtColor(closedH3, cv2.COLOR_BGR2RGB)
return closedH3
ColoredObjectDetector.py 文件源码
项目:robot-camera-platform
作者: danionescu0
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def find(self, image):
hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1])
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(contours) == 0:
return (False, False)
largest_contour = max(contours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(largest_contour)
M = cv2.moments(largest_contour)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
return (center, radius)
def segment(self, im):
mask = np.square(im.astype('float32') - self.bgim
).sum(axis=2) / 20
mask = np.clip(mask, 0, 255).astype('uint8')
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)
mask = cv2.dilate(mask, self.dilate_k)
mask = mask.astype('uint8')
return (mask > 10).astype('float32') *255
def convert_to_linedrawing(self, luminous_image_data):
kernel = numpy.ones((3, 3), numpy.uint8)
linedrawing = cv2.Canny(luminous_image_data, 5, 125)
linedrawing = cv2.bitwise_not(linedrawing)
linedrawing = cv2.erode(linedrawing, kernel, iterations=1)
linedrawing = cv2.dilate(linedrawing, kernel, iterations=1)
return linedrawing
def convert_to_linedrawing(self, luminous_image_data):
neiborhood24 = numpy.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
numpy.uint8)
dilated = cv2.dilate(luminous_image_data, neiborhood24, iterations=1)
diff = cv2.absdiff(dilated, luminous_image_data)
linedrawing = cv2.bitwise_not(diff)
return linedrawing
def morph_single(y_out):
"""Morphological transform.
Args:
y_out: [T, H, W]
"""
y_out_morph = np.zeros(y_out.shape)
kernel = np.ones([5, 5])
for ch in xrange(y_out.shape[0]):
y_out_morph[ch] = cv2.dilate(y_out[ch], kernel)
return y_out_morph
def border(self, alpha, size, kernel_type='RECT'):
"""
alpha : alpha layer of the text
size : size of the kernel
kernel_type : one of [rect,ellipse,cross]
@return : alpha layer of the border (color to be added externally).
"""
kdict = {'RECT':cv.MORPH_RECT, 'ELLIPSE':cv.MORPH_ELLIPSE,
'CROSS':cv.MORPH_CROSS}
kernel = cv.getStructuringElement(kdict[kernel_type],(size,size))
border = cv.dilate(alpha,kernel,iterations=1) # - alpha
return border
def build_mask(self, image):
""" Build the mask to find the path edges """
kernel = np.ones((3, 3), np.uint8)
img = cv2.bilateralFilter(image, 9, 75, 75)
img = cv2.erode(img, kernel, iterations=1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, self.lower_gray, self.upper_gray)
mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
mask2 = cv2.erode(mask2, kernel)
mask2 = cv2.dilate(mask2, kernel, iterations=1)
return mask2