def __init__(self, video_src):
self.cam = cv2.VideoCapture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('gesture_hci')
# set channel range of skin detection
self.mask_lower_yrb = np.array([44, 131, 80]) # [54, 131, 110]
self.mask_upper_yrb = np.array([163, 157, 155]) # [163, 157, 135]
# create trackbar for skin calibration
self.calib_switch = False
# create background subtractor
self.fgbg = cv2.BackgroundSubtractorMOG2(history=120, varThreshold=50, bShadowDetection=True)
# define dynamic ROI area
self.ROIx, self.ROIy = 200, 200
self.track_switch = False
# record previous positions of the centroid of ROI
self.preCX = None
self.preCY = None
# A queue to record last couple gesture command
self.last_cmds = FixedQueue()
# prepare some data for detecting single-finger gesture
self.fin1 = cv2.imread('./test_data/index1.jpg')
self.fin2 = cv2.imread('./test_data/index2.jpg')
self.fin3 = cv2.imread('./test_data/index3.jpg')
# switch to turn on mouse input control
self.cmd_switch = False
# count loop (frame), for debugging
self.n_frame = 0
# On-line Calibration for skin detection (bug, not stable)
评论列表
文章目录