def edge_pixel_image(image,bv_image):
edge_result = image.copy()
edge_result = cv2.Canny(edge_result,30,100)
i = 0
j = 0
while i < image.shape[0]:
j = 0
while j < image.shape[1]:
if edge_result[i,j] == 255 and bv_image[i,j] == 255:
edge_result[i,j] = 0
j = j+1
i = i+1
newfin = cv2.dilate(edge_result, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
return newfin
python类getStructuringElement()的实例源码
def extract_bv(image):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
contrast_enhanced_green_fundus = clahe.apply(image)
# applying alternate sequential filtering (3 times closing opening)
r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
f5 = clahe.apply(f4)
# removing very small contours through area parameter noise removal
ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
mask = np.ones(f5.shape[:2], dtype="uint8") * 255
im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 200:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(f5, f5, mask=mask)
ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
# removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
# vessels and also in an interval of area
fundus_eroded = cv2.bitwise_not(newfin)
xmask = np.ones(image.shape[:2], dtype="uint8") * 255
x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in xcontours:
shape = "unidentified"
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
shape = "circle"
else:
shape = "veins"
if(shape=="circle"):
cv2.drawContours(xmask, [cnt], -1, 0, -1)
finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)
blood_vessels = cv2.bitwise_not(finimage)
dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
#dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
blood_vessels_1 = cv2.bitwise_not(dilated)
return blood_vessels_1
def skin_filter(cfg, vd):
df = pd.read_csv(vd.photo_csv, index_col=0)
numbers = df.number.tolist()
notface = []
for number in numbers:
lower = np.array([0, 48, 80], dtype = "uint8")
upper = np.array([13, 255, 255], dtype = "uint8")
image = cv2.imread('%s/%d.png' % (vd.photo_dir, number), cv2.IMREAD_COLOR)
converted = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(converted, lower, upper)
# apply a series of erosions and dilations to the mask
# using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
skin = cv2.bitwise_and(image, image, mask = skinMask)
if len(skin.nonzero()[0]) < cfg.min_skin_pixels:
notface.append(number)
print '%d/%d are faces' % ( len(df) - len(notface), len(df) )
df['face']= 1
df.loc[df.number.isin(notface),'face'] = -99
df.to_csv(vd.photo_csv)
HandRecognition.py 文件源码
项目:hand-gesture-recognition-opencv
作者: mahaveerverma
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def hand_threshold(frame_in,hand_hist):
frame_in=cv2.medianBlur(frame_in,3)
hsv=cv2.cvtColor(frame_in,cv2.COLOR_BGR2HSV)
hsv[0:int(cap_region_y_end*hsv.shape[0]),0:int(cap_region_x_begin*hsv.shape[1])]=0 # Right half screen only
hsv[int(cap_region_y_end*hsv.shape[0]):hsv.shape[0],0:hsv.shape[1]]=0
back_projection = cv2.calcBackProject([hsv], [0,1],hand_hist, [00,180,0,256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_elem_size,morph_elem_size))
cv2.filter2D(back_projection, -1, disc, back_projection)
back_projection=cv2.GaussianBlur(back_projection,(gaussian_ksize,gaussian_ksize), gaussian_sigma)
back_projection=cv2.medianBlur(back_projection,median_ksize)
ret, thresh = cv2.threshold(back_projection, hsv_thresh_lower, 255, 0)
return thresh
# 3. Find hand contour
def outlining(img):
#kernel size
kernel_size=3
#-------------------------------------------------
#bilateral filter, sharpen, thresh image
biblur=cv2.bilateralFilter(img,20,175,175)
sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)
#negative and closed image
inv=cv2.bitwise_not(thresh1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
return closed
def DarkChannel(im, sz):
b, g, r = cv2.split(im)
dc = cv2.min(cv2.min(r, g), b);
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
dark = cv2.erode(dc, kernel)
return dark
def DarkChannel(im,sz):
b,g,r = cv2.split(im)
dc = cv2.min(cv2.min(r,g),b);
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(sz,sz))
dark = cv2.erode(dc,kernel)
return dark
def dealImage(image, thresh):
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 1))
dilate = cv2.dilate(image, kernel)
gray = cv2.cvtColor(dilate, cv2.COLOR_RGB2GRAY)
return cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]
# ??????
def dealImage(image, thresh):
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 1))
dilate = cv2.dilate(image, kernel)
gray = cv2.cvtColor(dilate, cv2.COLOR_RGB2GRAY)
return cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]
# ??????
def img_contour_extra(im):
# ?????
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)
img_show_hook("??????", bgmask)
# ??????
# ??????????
im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
cv2.CHAIN_APPROX_SIMPLE)
return contours
def img_contour_extra(im):
# ?????
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)
img_show_hook("??????", bgmask)
# ??????
# ??????????
im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
cv2.CHAIN_APPROX_SIMPLE)
return contours
def find_contours(self, image):
image = qimage_to_numpy(image)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#_,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV)
# kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
# dilated = cv2.dilate(gray,kernel,iterations = 13)
contours, hierarchy = cv2.findContours(gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
return contours
def __init__(self):
super(TargetFilterBrightness, self).__init__()
# elements to reuse in erode/dilate
# CROSS elimates more horizontal/vertical lines and leaves more
# blobs with extent in both axes [than RECT].
self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def resize_digits(digits):
digits = map(itemgetter('image'), sorted(digits, key=itemgetter('x')))
blur_kernel = np.ones((4, 4), np.float32)/(4*4)
erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
return [
cv2.resize(
cv2.bitwise_not(
cv2.filter2D(
cv2.erode(digit, erode_kernel, iterations=1),
-1, blur_kernel)
),
(20, 20))
for digit in digits]
def extractEdges(hue, intensity):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
edges = cv2.Canny(intensity, 120, 140)
hue_edges = cv2.Canny(cv2.GaussianBlur(hue, (5, 5), 0), 0, 255)
combined_edges = cv2.bitwise_or(hue_edges, edges)
_, mask = cv2.threshold(combined_edges, 40, 255, cv2.THRESH_BINARY)
return cv2.erode(cv2.GaussianBlur(mask, (3, 3), 0), kernel, iterations=1)
def roiFromEdges(edges):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
small_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# close gaps in edges to create continous regions
roi = cv2.dilate(edges, small_kernel, iterations=14)
return cv2.erode(roi, kernel, iterations=4)
def roiMask(image, boundaries):
scale = max([1.0, np.average(np.array(image.shape)[0:2] / 400.0)])
shape = (int(round(image.shape[1] / scale)), int(round(image.shape[0] / scale)))
small_color = cv2.resize(image, shape, interpolation=cv2.INTER_LINEAR)
# reduce details and remove noise for better edge detection
small_color = cv2.bilateralFilter(small_color, 8, 64, 64)
small_color = cv2.pyrMeanShiftFiltering(small_color, 8, 64, maxLevel=1)
small = cv2.cvtColor(small_color, cv2.COLOR_BGR2HSV)
hue = small[::, ::, 0]
intensity = cv2.cvtColor(small_color, cv2.COLOR_BGR2GRAY)
edges = extractEdges(hue, intensity)
roi = roiFromEdges(edges)
weight_map = weightMap(hue, intensity, edges, roi)
_, final_mask = cv2.threshold(roi, 5, 255, cv2.THRESH_BINARY)
small = cv2.bitwise_and(small, small, mask=final_mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
for (lower, upper) in boundaries:
lower = np.array([lower, 80, 50], dtype="uint8")
upper = np.array([upper, 255, 255], dtype="uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(small, lower, upper)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=3)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
final_mask = cv2.bitwise_and(final_mask, mask)
# blur the mask for better contour extraction
final_mask = cv2.GaussianBlur(final_mask, (5, 5), 0)
return (final_mask, weight_map, scale)
def process(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gau=cv2.GaussianBlur(gray,(5,5),0)
ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
med=cv2.medianBlur(thre,5)
canny=cv2.Canny(thre,100,200)
#sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
dilation=cv2.dilate(canny,element2,iterations = 1)
dst=cv2.erode(dilation, element1, iterations = 1)
return dst
def check_if_good_boundary(self, boundary, norm_height, norm_width, color_img):
preprocess_bg_mask = PreprocessBackgroundMask(boundary)
char_w = norm_width / 20
remove_noise = PreprocessRemoveNonCharNoise(char_w)
id_card_img_mask = preprocess_bg_mask.do(color_img)
id_card_img_mask[0:int(norm_height*0.05),:] = 0
id_card_img_mask[int(norm_height*0.95): ,:] = 0
id_card_img_mask[:, 0:int(norm_width*0.05)] = 0
id_card_img_mask[:, int(norm_width*0.95):] = 0
remove_noise.do(id_card_img_mask)
# se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
# se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
# mask = cv2.morphologyEx(id_card_img_mask, cv2.MORPH_CLOSE, se1)
# id_card_img_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
#
## remove right head profile
left_half_id_card_img_mask = np.copy(id_card_img_mask)
left_half_id_card_img_mask[:, norm_width/2:] = 0
## Try to find text lines and chars
horizontal_sum = np.sum(left_half_id_card_img_mask, axis=1)
line_ranges = extract_peek_ranges_from_array(horizontal_sum)
return len(line_ranges) >= 5 and len(line_ranges) <= 7
def contour_plot_on_text_in_image(inv_img):
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 2))
dilated = cv2.dilate(inv_img, kernel, iterations=7) # dilate
_, contours, hierarchy = cv2.findContours(
dilated,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE) # get contours
return contours