def detectFace(image):
cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
FACE_SHAPE = 0.45
result = image.copy()
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cascade = cv2.CascadeClassifier(cascadePath)
faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))
if len(faceRect) <= 0:
return False
else:
# confirm face
imageSize = image.shape[0] * image.shape[1]
#print("d1")
filteredFaceRects = []
for faceR in faceRect:
faceSize = faceR[2]*faceR[3]
if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
break
filteredFaceRects.append(faceR)
if len(filteredFaceRects) > 0:
return True
else:
return False
python类CascadeClassifier()的实例源码
def face_detect(self, img):
""" Detect the face location of the image img, using Haar cascaded face detector of OpenCV.
return : x,y w, h of the bouning box.
"""
face_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(img, 1.3, 5)
x = -1
y = -1
w = -1
h = -1
if len(faces) == 1: # we take only when we have 1 face, else, we return nothing.
x,y,w,h = faces[0]
else:
## for (x_,y_,w_,h_) in faces:
## x = x_
## y = y_
## w = w_
## h = h_
## break # we take only the first face,
print "More than one face!!!!!!!!!"
return x,y,w,h
def index():
img_array = []
label_array = []
face_cascade = cv2.CascadeClassifier("https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_alt.xml")
recognizer = cv2.createLBPHFaceRecognizer()
for row in db(db.faces.id > 0).select():
rtn = row
path=os.path.join(request.folder, 'uploads', rtn.file)
# image = response.download(open(path, 'rb'), chunk_size=4096)
img = cv2.imread(path, 0)
img_array.append(img)
# faces = face_cascade.detectMultiScale(img, 1.3, 5)
# for (x,y,w,h) in faces:
# img_array.append(img[y: y + h, x: x + w])
label_array.append(rtn.user_id)
recognizer.train(img_array, np.array(label_array))
recognizer.save(os.path.join(request.folder, 'private', "trained_recognizer.xml"))
return response.download("trained_recognizer.xml")
def find_eyes(img):
# print("Searching for eyes...")
coords = []
face_cascade = cv2.CascadeClassifier('./classifiers/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./classifiers/haarcascade_eye.xml')
gray = np.array(img.convert("L"))
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
roi_gray = gray[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
# print("\tFound eye at ({0}, {1})".format(x+ex+ew/2, y+ey+eh/2))
coords.append((x+ex+ew/2, y+ey+eh/2))
if len(coords) == 0:
# print("\tNo eyes found.")
pass
return coords
def process_image(img = list()):
"""
Extracts faces from the image using haar cascade, resizes and applies filters.
:param img: image matrix. Must be grayscale
::returns faces:: list contatining the cropped face images
"""
face_cascade = cv2.CascadeClassifier('/Users/mehul/opencv-3.0.0/build/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
faces_location = face_cascade.detectMultiScale(img, 1.3, 5)
faces = []
for (x,y,w,h) in faces_location:
img = img[y:(y+h), x:(x+w)]
try:
img = cv2.resize(img, (256, 256))
except:
exit(1)
img = cv2.bilateralFilter(img,15,10,10)
img = cv2.fastNlMeansDenoising(img,None,4,7,21)
faces.append(img)
return faces
def __init__(self, scale=1.08):
script_path = common.get_script_path()
self.cascade = cv2.CascadeClassifier(script_path + "/haarcascade_frontalface_alt.xml")
self.cascade_profile = cv2.CascadeClassifier(script_path + '/haarcascade_profileface.xml')
self.scale = scale
self.hog = cv2.HOGDescriptor()
self.hog.load(script_path + '/hard_negative_svm/hog.xml')
self.svm = cv2.ml.SVM_load(script_path + '/hard_negative_svm/output_frontal.xml')
self.svm_profile = cv2.ml.SVM_load(script_path + '/hard_negative_svm/output_profile.xml')
def main(args):
saveFace = None;
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
if len(faces) > 0:
saveFace = frame
break;
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)
mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
myImage = []
for file in onlyfiles:
isImage = None
file = mypath + '/' + file
isImage = imghdr.what(file)
if isImage != None:
myImage.append(file)
#begin facenet
cp.main(args,myImage);
facegroup.py 文件源码
项目:Automatic_Group_Photography_Enhancement
作者: Yuliang-Zou
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def getFaceData(img):
# Create the haar cascade
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Read the image
image = cv2.imread(img)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
facedata = image[y:y+h, x:x+w]
return facedata
def center_from_faces(matrix):
face_cascade = cv2.CascadeClassifier(cascade_path)
faces = face_cascade.detectMultiScale(matrix, FACE_DETECT_REJECT_LEVELS, FACE_DETECT_LEVEL_WEIGHTS)
x, y = (0, 0)
weight = 0
# iterate over our faces array
for (x, y, w, h) in faces:
print('Face detected at ', x, y, w, h)
weight += w * h
x += (x + w / 2) * w * h
y += (y + h / 2) * w * h
if len(faces) == 0:
return False
return {
'x': x / weight,
'y': y / weight,
'count': len(faces)
}
def faceDetect(path, fileName):
img = cv2.read(path)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR, SCALE_IMAGE, (20, 20))
if len(rects) == 0:
return False
rects[:, 2:] += rects[:, :2]
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
cv2.imwrite("%s/%s-%s" % (facesDirectory, pcapFile, fileName), img)
return True
Modules.py 文件源码
项目:apparent-age-gender-classification
作者: danielyou0230
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def debug_face_classifier(file):
face_cascade = cv2.CascadeClassifier(xml_face_classifier)
image = cv2.imread(file)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(image, 1.07, 3)
print faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
#roi_gray = gray[y:y+h, x:x+w]
#roi_color = image[y:y+h, x:x+w]
cv2.imshow('Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def __init__(self):
self.node_name = "face_recog_fisher"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.bridge = CvBridge()
self.face_names = StringArray()
self.all_names = StringArray()
self.size = 4
face_haar = 'haarcascade_frontalface_default.xml'
self.haar_cascade = cv2.CascadeClassifier(face_haar)
self.face_dir = 'face_data_fisher'
self.model = cv2.createFisherFaceRecognizer()
# self.model = cv2.createEigenFaceRecognizer()
(self.im_width, self.im_height) = (112, 92)
rospy.loginfo("Loading data...")
# self.fisher_train_data()
self.load_trained_data()
rospy.sleep(3)
# self.img_sub = rospy.Subscriber("/asus/rgb/image_raw", Image, self.img_callback)
self.img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)
# self.img_pub = rospy.Publisher('face_img', Image, queue_size=10)
self.name_pub = rospy.Publisher('face_names', StringArray, queue_size=10)
self.all_names_pub = rospy.Publisher('all_face_names', StringArray, queue_size=10)
rospy.loginfo("Detecting faces...")
def __init__(self):
self.node_name = "train_faces_eigen"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.bridge = CvBridge()
self.size = 4
face_haar = 'haarcascade_frontalface_default.xml'
self.haar_cascade = cv2.CascadeClassifier(face_haar)
self.face_dir = 'face_data_eigen'
self.face_name = sys.argv[1]
self.path = os.path.join(self.face_dir, self.face_name)
# self.model = cv2.createFisherFaceRecognizer()
self.model = cv2.createEigenFaceRecognizer()
self.cp_rate = 5
if not os.path.isdir(self.path):
os.mkdir(self.path)
self.count = 0
self.train_img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)
# self.train_img_pub = rospy.Publisher('train_face', Image, queue_size=10)
rospy.loginfo("Capturing data...")
def __init__(self):
self.node_name = "train_faces_fisher"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.bridge = CvBridge()
self.size = 4
face_haar = 'haarcascade_frontalface_default.xml'
self.haar_cascade = cv2.CascadeClassifier(face_haar)
self.face_dir = 'face_data_fisher'
self.face_name = sys.argv[1]
self.path = os.path.join(self.face_dir, self.face_name)
self.model = cv2.createFisherFaceRecognizer()
# self.model = cv2.createEigenFaceRecognizer()
self.cp_rate = 5
if not os.path.isdir(self.path):
os.mkdir(self.path)
self.count = 0
self.train_img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)
# self.train_img_pub = rospy.Publisher('train_face', Image, queue_size=10)
rospy.loginfo("Capturing data...")
def __init__(self):
self.node_name = "face_recog_eigen"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.bridge = CvBridge()
self.face_names = StringArray()
self.size = 4
face_haar = 'haarcascade_frontalface_default.xml'
self.haar_cascade = cv2.CascadeClassifier(face_haar)
self.face_dir = 'face_data_eigen'
# self.model = cv2.createFisherFaceRecognizer()
self.model = cv2.createEigenFaceRecognizer()
(self.im_width, self.im_height) = (112, 92)
rospy.loginfo("Loading data...")
# self.fisher_train_data()
self.load_trained_data()
rospy.sleep(3)
# self.img_sub = rospy.Subscriber("/asus/rgb/image_raw", Image, self.img_callback)
self.img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)
# self.img_pub = rospy.Publisher('face_img', Image, queue_size=10)
self.name_pub = rospy.Publisher('face_names', StringArray, queue_size=10)
self.all_names_pub = rospy.Publisher('all_face_names', StringArray, queue_size=10)
rospy.loginfo("Detecting faces...")
def load_cascades():
# Load Haar cascade files containing features
cascPaths = ['models/haarcascades/haarcascade_frontalface_default.xml',
'models/haarcascades/haarcascade_frontalface_alt.xml',
'models/haarcascades/haarcascade_frontalface_alt2.xml',
'models/haarcascades/haarcascade_frontalface_alt_tree.xml'
'models/lbpcascades/lbpcascade_frontalface.xml']
faceCascades = []
for casc in cascPaths:
faceCascades.append(cv.CascadeClassifier(casc))
return faceCascades
# Do Haar cascade face detection on a single image
# Face detection returns a list of faces
# Where each face is the coordinates of a rectangle containing a face:
# (x,y,w,h)
def getFaceArray(img):
#??,haarcascade_frontalface_default.xml??????????
face_cascade=cv2.CascadeClassifier("/home/jiangwei/??/faceRead/haarcascade_frontalface_default.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img #if?????img???3????????????????gray?????3????2????????
faces = face_cascade.detectMultiScale(gray, 1.2, 5)#1.3?5?????????????????????????
result = []
for (x,y,width,height) in faces:
result.append((x,y,x+width,y+height))
return result
# if(len(result)>0):
# # for r in result:
# # img2=cv2.rectangle(img, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
# # img3=img[r[1]:r[3], r[0]:r[2]] # ?????????????
#
# return result
#
# return []
#??????
def getFaceImg(img):
face_cascade=cv2.CascadeClassifier("/home/jiangwei/??/faceRead/haarcascade_frontalface_default.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img #if?????img???3????????????????gray?????3????2????????
faces = face_cascade.detectMultiScale(gray, 1.2, 5)#1.3?5?????????????????????????
result = []
for (x,y,width,height) in faces:
result.append((x,y,x+width,y+height))
print result
if(len(result)>0):
for r in result:
img2=cv2.rectangle(img, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
img3=img[r[1]:r[3], r[0]:r[2]] # ?????????????
return [img3,img2]
return []
#??????
FaceRecognitionWebStreaming.py 文件源码
项目:CodeLabs
作者: TheIoTLearningInitiative
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_frame(self):
ret, self.image = self.cap.read()
cv2.imwrite(self.temporal, self.image)
faceCascade = cv2.CascadeClassifier("classifier/haarcascade_frontalface_alt.xml")
image = cv2.imread(self.temporal)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
print "Found {0} faces!".format(len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite(self.faces,np.hstack((self.image,image)))
return open(self.faces, 'rb').read()
def detect(self):
faceCascade = cv2.CascadeClassifier(self.cascPath)
image = cv2.imread(self.imageinput)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
print "Found {0} faces!".format(len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite(self.imageoutput, image)
cv2.waitKey(0)
def __init__(self, facePredictor):
"""
Instantiate an 'AlignDlib' object.
:param facePredictor: The path to dlib's facial landmark detector
:type facePredictor: str
:param OPENCV_Detector: The path to opencv's HaarCasscade
:type OPENCV_Detector: str
:param HOG_Detector: The path to dlib's HGO face detection model
:type HOG_Detector: str
"""
assert facePredictor is not None
self.OPENCV_Detector = cv2.CascadeClassifier("/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_default.xml")
self.HOG_Detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(facePredictor)
def check_opencv_accuracy(image_paths, bounding_boxes_map):
detection_scores = []
filters_path = os.path.expanduser("~/anaconda3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml")
cascade_classifier = cv2.CascadeClassifier(filters_path)
for path in tqdm.tqdm(image_paths):
image = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
image_bounding_box = shapely.geometry.box(0, 0, image.shape[1], image.shape[0])
face_bounding_box = bounding_boxes_map[os.path.basename(path)]
# Only try to search for faces if they are larger than 1% of image. If they are smaller,
# ground truth bounding box is probably incorrect
if face.geometry.get_intersection_over_union(image_bounding_box, face_bounding_box) > 0.01:
value = 1 if does_opencv_detect_face_correctly(image, face_bounding_box, cascade_classifier) else 0
detection_scores.append(value)
print("OpenCV accuracy is {}".format(np.mean(detection_scores)))
def facedetect(file):
""" haar????????????????????????
Args:
file : ????????????
"""
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
img = cv2.imread(file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for(ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
def __init__(self):
cfg = Config()
# set up face detection models
opencv_home = cfg.get("face_detection", "opencv_home")
haarcascade = cfg.get("face_detection", "haarcascade")
cascadePath = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"
self.faceCascade = cv2.CascadeClassifier('{0}/{1}'.format(opencv_home, haarcascade))
self.recognizer = cv2.face.createLBPHFaceRecognizer()
#self.recognizer = cv2.face.createEigenFaceRecognizer()
#self.recognizer = cv2.face.createFisherFaceRecognizer()
# the faces and Raspberry Pi locations we'll use
self.names = ["james", "juanjo", "sayantan", "vineet"]
self.rasp_names = ["FrontDoor", "Entrance", "Garage"]
access = cfg.get("aws", "access_key_id")
secret = cfg.get("aws", "secret_access_key")
# connect to dynamo
self.conn = boto.dynamodb2.connect_to_region('us-west-1', aws_access_key_id=access, aws_secret_access_key=secret)
self.sc = Table('SMARTCAM', connection=self.conn)
# read in training set and train the model
Object_Detection_Haar_Cascade.py 文件源码
项目:Face-Detection-using-Haarcascade
作者: KrUciFieR-Jr
项目源码
文件源码
阅读 71
收藏 0
点赞 0
评论 0
def detect():
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
if(face_cascade=='0'):
print("Hello This is NUll")
while True:
ret , img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
def detectFaces(image_path):
"""
Open the image based on the image_path and find all faces in the image.
Finally, return the coordinates , width and height as a list
"""
img = cv2.imread(image_path)
face_cascade = cv2.CascadeClassifier("cvdata\\haarcascades\\haarcascade_frontalface_default.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(10,10),
flags=cv2.CASCADE_SCALE_IMAGE)
result = []
for (x,y,width,height) in faces:
result.append((x,y,x+width,y+height))
return result
def detect_faces(image):
face_cascade1 = cv2.CascadeClassifier(XML_PATH1)
if image.ndim == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
faces = face_cascade1.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(10,10),
flags=cv2.CASCADE_SCALE_IMAGE)
result=[]
for (x,y,width,height) in faces :
result.append((x,y,x+width,y+height))
return result
def detect(img_file, detector_xml_path, dest_img_file):
img = cv2.imread(img_file)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = cv2.CascadeClassifier(detector_xml_path)
min_size = (min(50, gray_img.shape[0] // 10), min(50, gray_img.shape[1] // 10))
hits = detector.detectMultiScale(gray_img, 1.1, 4, 0, min_size)
#cv2.groupRectangles(hits, 2)
print(hits)
hits_img = np.copy(img)
for (x,y,w,h) in hits:
cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)
cv2.imwrite(dest_img_file, hits_img)
def detect(cls,
image,
min_size=(50, 50),
scale_factor=1.1,
min_neighbors=5,
cascade_file=_current_cascade):
""" Return list of objects detected.
image -- The image (numpy matrix) read by readImage function.
min_size -- Minimum possible object size. Objects smaller than that are ignored (default (50,50)).
scale_factor -- Specifying how much the image size is reduced at each image scale (default 1.1).
min_neighbors -- Specifying how many neighbors each candidate rectangle should have to retain it (default 5).
cascade_file -- The path of cascade xml file use for detection (default current value)
"""
classifier = cls._classifier
if cascade_file != cls._current_cascade:
classifier = cv2.CascadeClassifier(cascade_file)
gray_image = cls.bgr_to_gray(image)
return classifier.detectMultiScale(gray_image,
scaleFactor=scale_factor,
minNeighbors=min_neighbors,
minSize=min_size)
def test_file():
count = 1
face_cascade = cv2.CascadeClassifier(
'/usr/local/opt/opencv3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
argvs = sys.argv
for argv in argvs[1:]:
img = cv2.imread(argv)
if type(img) != str:
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print('convert succeed')
except:
print('can not convert to gray image')
continue
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
f = cv2.resize(gray[y:(y + h), x:(x + w)], (128, 128))
model = load_model('/Users/songheqi/model/model.h5')
num, acc = predict(model, f, 128)
name_list = read_name_list('/Users/songheqi/train_set/')
print('The {} picture is '.format(count) +
name_list[num] + ' acc : ', acc)
count += 1