def input_control(self, count_defects, img_src):
# update position difference with previous frame (for move mouse)
d_x, d_y = 0, 0
if self.preCX is not None:
d_x = self.ROIx - self.preCX
d_y = self.ROIy - self.preCY
# checking current command, and filter out unstable hand gesture
cur_cmd = 0
if self.cmd_switch:
if self.last_cmds.count(count_defects) >= self.last_cmds.n_maj:
cur_cmd = count_defects
#print 'major command is ', cur_cmd
else:
cur_cmd = 0 # self.last_cmds.major()
else:
cur_cmd = count_defects
# send mouse input event depend on hand gesture
if cur_cmd == 1:
str1 = '2, move mouse dx,dy = ' + str(d_x*3) + ', ' + str(d_y*3)
cv2.putText(img_src, str1, (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.moveRel(d_x*3, d_y*3)
self.last_cmds.push(count_defects)
#pyautogui.mouseDown(button='left')
#pyautogui.moveRel(d_x, d_y)
#else:
# pyautogui.mouseUp(button='left')
elif cur_cmd == 2:
cv2.putText(img_src, '3 Left (rotate)', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.dragRel(d_x, d_y, button='left')
self.last_cmds.push(count_defects)
#pyautogui.scroll(d_y,pause=0.2)
elif cur_cmd == 3:
cv2.putText(img_src, '4 middle (zoom)', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.dragRel(d_x, d_y, button='middle')
self.last_cmds.push(count_defects)
elif cur_cmd == 4:
cv2.putText(img_src, '5 right (pan)', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.dragRel(d_x, d_y, button='right')
self.last_cmds.push(count_defects)
elif cur_cmd == 5:
cv2.putText(img_src, '1 fingertip show up', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
self.last_cmds.push(count_defects)
else:
cv2.putText(img_src, 'No finger detect!', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
self.last_cmds.push(count_defects) # no finger detect or wrong gesture
# testing pyautogui
python类FONT_HERSHEY_TRIPLEX的实例源码
def main(args):
infos = _get_classifier_model_info(args.model_version)
with tf.Graph().as_default():
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = mtcnn.create_mtcnn(sess, args.caffe_model_dir)
with tf.Graph().as_default():
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
recognize = csair_classifier.create_classifier(sess, model_def=infos['model_def'],
image_size=int(infos['image_size']),
embedding_size=int(infos['embedding_size']),
nrof_classes=int(infos['nrof_classes']),
ckpt_dir=args.ckpt_dir)
conn = db_utils.open_connection()
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
bounding_boxes, points = mtcnn.detect_face(frame, 20, pnet, rnet, onet, args.threshold, args.factor)
if len(bounding_boxes) > 0:
for i in range(len(bounding_boxes)):
box = bounding_boxes[i].astype(int)
# mark = np.reshape(points[:, i].astype(int), (2, 5)).T
crop = cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
crop = cv2.resize(crop, (160, 160), interpolation=cv2.INTER_CUBIC)
crop = np.expand_dims(crop, 0)
value, index = csair_classifier.classify(crop, recognize)
font = cv2.FONT_HERSHEY_TRIPLEX
name = db_utils.get_candidate_info(conn, int(index[0][0]))[0]
text = 'person: ' + name + ' probability: ' + str(value[0][0])
# print('text: ', text)
cv2.putText(frame, text, (box[0], box[1]), font, 0.42, (255, 255, 0))
# for p in mark:
# cv2.circle(frame, (p[0], p[1]), 3, (0, 0, 255))
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
db_utils.close_connection(conn)