def __process_image(self, image):
# Our operations on the frame come here
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv = cv2.medianBlur(hsv, 5)
draw_col = (0,0,255)
p1 = (LEFT_EDGE, BOTTOM_EDGE)
p2 = (RIGHT_EDGE, TOP_EDGE)
cv2.rectangle(hsv, p1, p2, draw_col) #bounding rectangle
vert_spacing = (RIGHT_EDGE - LEFT_EDGE)/31. # vertical lines
for i in range(1, 31):
x_pos = int(LEFT_EDGE + i*vert_spacing)
p1 = (x_pos, BOTTOM_EDGE)
p2 = (x_pos, TOP_EDGE)
cv2.line(hsv, p1, p2, draw_col)
horiz_spacing = (BOTTOM_EDGE - TOP_EDGE)/28. # horizontal lines
for i in range(1, 28):
y_pos = int(TOP_EDGE + i*horiz_spacing)
p1 = (LEFT_EDGE, y_pos)
p2 = (RIGHT_EDGE, y_pos)
cv2.line(hsv, p1, p2, draw_col)
# cv2.imshow('Grid', hsv)
# cv2.waitKey(1)
# box around target pixel for testing
# pt = (350, 600)
# cv2.circle(hsv, pt, 3, draw_col, thickness =1)
# print hsv[600][350]
return hsv
python类medianBlur()的实例源码
def __detect_bot(self, hsv_image):
# Experimentally determined LED thresholds
BOT_MIN = np.array([28,8,100], np.uint8)
BOT_MAX = np.array([32,255,255], np.uint8)
thresholded_image = cv2.inRange(hsv_image, BOT_MIN, BOT_MAX)
thresholded_image = cv2.medianBlur(thresholded_image, 15)
# cv2.imshow('Yellow Tresh', thresholded_image)
# cv2.waitKey(1)
contours, hierarchy = cv2.findContours(thresholded_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
(bot_x, bot_y) = (-1000,-1000)
else:
bot = contours[0]
M = cv2.moments(bot)
if len(bot) > 2:
bot_x = int(M['m10']/M['m00'])
bot_y = int(M['m01']/M['m00'])
else:
bot_x = self.current_location[0]
bot_y = self.current_location[1]
return thresholded_image, (bot_x, bot_y)
HandRecognition.py 文件源码
项目:hand-gesture-recognition-opencv
作者: mahaveerverma
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def hand_threshold(frame_in,hand_hist):
frame_in=cv2.medianBlur(frame_in,3)
hsv=cv2.cvtColor(frame_in,cv2.COLOR_BGR2HSV)
hsv[0:int(cap_region_y_end*hsv.shape[0]),0:int(cap_region_x_begin*hsv.shape[1])]=0 # Right half screen only
hsv[int(cap_region_y_end*hsv.shape[0]):hsv.shape[0],0:hsv.shape[1]]=0
back_projection = cv2.calcBackProject([hsv], [0,1],hand_hist, [00,180,0,256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_elem_size,morph_elem_size))
cv2.filter2D(back_projection, -1, disc, back_projection)
back_projection=cv2.GaussianBlur(back_projection,(gaussian_ksize,gaussian_ksize), gaussian_sigma)
back_projection=cv2.medianBlur(back_projection,median_ksize)
ret, thresh = cv2.threshold(back_projection, hsv_thresh_lower, 255, 0)
return thresh
# 3. Find hand contour
def xmedian(ref,mwid):
temp=np.isnan(ref)
tmean=np.nanmean(ref)
ref[temp]=tmean
ref2=cv2.blur(ref,(mwid,mwid))
ref[temp]=ref2[temp]
tempx=np.uint8(255*ref)
return cv2.medianBlur(tempx,mwid)/255.0
def ymedian0(aero,cls,mwid):
temp=np.isnan(aero)
tmean=np.nanmean(aero)
aero[temp]=tmean
aero2=cv2.blur(aero,(mwid,mwid))
aero[temp]=aero2[temp]
tempx=np.uint8(100*aero)
aerox=cv2.medianBlur(tempx,mwid)/100.0
return aerox
def ymedian(aero,cls,mwid,twid):
temp=np.isnan(aero)
tmean=np.nanmean(aero)
aero[temp]=tmean
aero2=cv2.blur(aero,(mwid,mwid))
aero[temp]=aero2[temp]
# 4/28/2016
#tempx=np.uint8(255*aero)
tempx=np.uint8(100*aero)
#aerox=cv2.medianBlur(tempx,mwid)/255.0
aerox=cv2.medianBlur(tempx,mwid)/100.0
ptemp=np.where(np.abs(aero-aerox) > twid)
cls[ptemp]=-1
return aerox
def blur(self):
px = 5
self.data = cv2.blur(self.data, (px, px))
# self.data = cv2.medianBlur(self.data, px)
def getDepth(self):
"""
Return a median smoothed depth image
:return: depth data as numpy array
"""
if self.mirror:
depth = dsc.getDepthMap()[:, ::-1]
else:
depth = dsc.getDepthMap()
depth = cv2.medianBlur(depth, 3)
return (numpy.count_nonzero(depth) != 0), numpy.asarray(depth, numpy.float32)
def main():
data = pd.read_csv(
'Leon_group1_densified_point_cloud.xyz',
names=['X', 'Y', 'Z', 'C_R','C_G','C_B'],
delim_whitespace=True)
# Calculate Geotiff information
Auto = True
# If it is auto
if Auto == True:
# spacing could be changed
spacing = 1.6*get_space(data)
w = int((data.X.max() - data.X.min()) / spacing)
h = int((data.Y.max() - data.Y.min()) / spacing)
affine_par = [spacing,0,0,-spacing,data.X.min(),data.Y.max()]
else:
affine_name = ''
affine_par = np.loadtxt(affine_name) # input the affine name
h = 1792
w = 1053
print(affine_par)
print(h,w)
# Generate DEM
ortho = GEM_Dsm(data, h, w, 3, 0.15,affine_par)
# save to tif
ortho = ortho.astype(np.uint8)
# ortho = cv2.medianBlur(ortho, 3)
cv2.imwrite('ortho.tif',ortho)
array2Raster(ortho,affine_par,'test.tif')
def getDisparity(stereo, img1, img2, mapx1, mapy1, mapx2, mapy2):
dst1 = cv2.remap(img1, mapx1, mapy1, cv2.INTER_LINEAR)
dst2 = cv2.remap(img2, mapx2, mapy2, cv2.INTER_LINEAR)
gray1 = cv2.cvtColor(dst1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(dst2, cv2.COLOR_BGR2GRAY)
disparity = stereo.compute(gray1, gray2)/16
# disparity = cv2.medianBlur(disparity, 5)
return disparity
def Median(self, img, size):
dImg = cv2.medianBlur(img, size)
return dImg
def preprocess(img):
'''????????'''
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#img=cv2.GaussianBlur(img,(3,3),0)
img=cv2.medianBlur(img,5)
img=cv2.equalizeHist(img)
return img
def process(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gau=cv2.GaussianBlur(gray,(5,5),0)
ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
med=cv2.medianBlur(thre,5)
canny=cv2.Canny(thre,100,200)
#sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
dilation=cv2.dilate(canny,element2,iterations = 1)
dst=cv2.erode(dilation, element1, iterations = 1)
return dst
def threshold_image_for_tape(image):
"""
Thresholds image for reflective tape with light shined on it. This means it
looks for pixels that are almost white, makes them white, and makes
everything else black.
Parameters:
:param: `image` - the source image to threshold from
"""
orig_image = numpy.copy(image)
# print orig_image.size
orig_image = cv2.medianBlur(orig_image, 3)
# orig_image[orig_image > 100] = 255
# return orig_image[orig_image > 100]
height, width = orig_image.shape[0], orig_image.shape[1]
eight_bit_image = numpy.zeros((height, width, 1), numpy.uint8)
cv2.inRange(orig_image,
(B_RANGE[0], G_RANGE[0], R_RANGE[0], 0),
(B_RANGE[1], G_RANGE[1], R_RANGE[1], 100),
eight_bit_image)
# # eight_bit_image = cv2.adaptiveThreshold(orig_image,
# # 255,
# # cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
# # cv2.THRESH_BINARY,
# # 8,
# # 0)
# cv2.medianBlur(eight_bit_image, 9)
return eight_bit_image
def th2(self,img):
# ?????
# ????
# median = cv2.medianBlur(thresh,3)
# img_blur = cv2.GaussianBlur(img_gray, (m_blurBlock,m_blurBlock), 0)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 19)
return thresh
# ?????
def get_median_blur(gray_frame):
return cv2.medianBlur(gray_frame, 5)
# Canny edge detection
def videoToImageArray(filename,time_start,time_stop):
vidcap = cv2.VideoCapture(filename)
pictures = [[],[]]
for time in range(time_start,time_stop):
vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000) # just cue to 20 sec. position
success,image = vidcap.read()
image = cv2.medianBlur(image,7)
resized = imutils.resize(image, width=800)
p1 = resized[370:430,220:300]
p2 = resized[370:430,520:600]
p1 = cv2.Canny(p1, 400, 100, 255)
p2 = cv2.Canny(p2, 400, 100, 255)
pictures[0].append(p1)
pictures[1].append(p2)
return pictures
image_transformation.py 文件源码
项目:Sign-Language-Recognition
作者: Anmol-Singh-Jaggi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def make_background_black(frame):
"""
Makes everything apart from the main object of interest to be black in color.
"""
print("Making background black...")
# Convert from RGB to HSV
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Prepare the first mask.
# Tuned parameters to match the skin color of the input images...
lower_boundary = np.array([0, 40, 30], dtype="uint8")
upper_boundary = np.array([43, 255, 254], dtype="uint8")
skin_mask = cv2.inRange(frame, lower_boundary, upper_boundary)
# Apply a series of erosions and dilations to the mask using an
# elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
skin_mask = cv2.erode(skin_mask, kernel, iterations=2)
skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)
# Prepare the second mask
lower_boundary = np.array([170, 80, 30], dtype="uint8")
upper_boundary = np.array([180, 255, 250], dtype="uint8")
skin_mask2 = cv2.inRange(frame, lower_boundary, upper_boundary)
# Combine the effect of both the masks to create the final frame.
skin_mask = cv2.addWeighted(skin_mask, 0.5, skin_mask2, 0.5, 0.0)
# Blur the mask to help remove noise.
# skin_mask = cv2.medianBlur(skin_mask, 5)
frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)
frame = cv2.addWeighted(frame, 1.5, frame_skin, -0.5, 0)
frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)
print("Done!")
return frame_skin
def pre_process_debug(image):
cv2.imshow("Image", image)
image_eqhist = equalize_hist(image)
cv2.imshow("Equalize Hist", image_eqhist)
image_blur = cv2.medianBlur(image, 5)
cv2.imshow("Blur", image_blur)
image_blur_eqhist = equalize_hist(image_blur)
cv2.imshow("Blur + Equalize Hist", image_blur_eqhist)
image_eqhist_blur = cv2.medianBlur(image_eqhist, 5)
cv2.imshow("Equalize Hist + Blur", image_eqhist_blur)
cv2.waitKey(0)