def get_points():
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*8,3), np.float32)
objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('calibration_wide/GO*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (8,6), corners, ret)
#write_name = 'corners_found'+str(idx)+'.jpg'
#cv2.imwrite(write_name, img)
cv2.imshow('img', img)
cv2.waitKey(500)
cv2.destroyAllWindows()
return objpoints, imgpoints
python类destroyAllWindows()的实例源码
def CaptureImage():
imageName = 'DontCare.jpg' #Just a random string
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
rgbImage = frame #For capture the image in RGB color space
# Display the resulting frame
cv2.imshow('Webcam',rgbImage)
#Wait to press 'q' key for capturing
if cv2.waitKey(1) & 0xFF == ord('q'):
#Set the image name to the date it was captured
imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
#Save the image
cv2.imwrite(imageName, rgbImage)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#Returns the captured image's name
return imageName
def do_key_press(symbol, modifiers):
global cur_vector
print("SO: {}".format(symbol))
if(symbol == key.R):
if theApp.use_camera:
theApp.set_camera_recording(not theApp.camera_recording)
if(symbol == key.T):
theApp.show_camera = not theApp.show_camera
elif(symbol == key.SPACE):
print("SPACEBAR")
snapshot(None);
elif(symbol == key.ESCAPE):
print("ESCAPE")
cv2.destroyAllWindows()
if theApp.use_camera:
cv2.VideoCapture(0).release()
sys.exit(0)
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
cap = cv2.VideoCapture(src)
while(1):
ret, frame = cap.read()
# To speed up processing; Almost real-time on my PC
frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
frame = cv2.flip(frame, 1)
out = func(frame, args)
if out is None:
continue
out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
cv2.imshow(win_name, out)
cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
k = cv2.waitKey(delim_wait)
if k == delim_key:
cv2.destroyAllWindows()
cap.release()
return
def cvCaptureVideo():
capture = cv2.VideoCapture(0)
if capture.isOpened() is False:
raise("IO Error")
cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)
while True:
ret, image = capture.read()
if ret == False:
continue
cv2.imshow("Capture", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
# Matplot???Web????????????
def MoG2(vid, min_thresh=800, max_thresh=10000):
'''
Args : Video object and threshold parameters
Returns : None
'''
cap = cv2.VideoCapture(vid)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
fgbg = cv2.createBackgroundSubtractorMOG2()
connectivity = 4
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
output = cv2.connectedComponentsWithStats(
fgmask, connectivity, cv2.CV_32S)
for i in range(output[0]):
if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
cv2.imshow('detection', frame)
cap.release()
cv2.destroyAllWindows()
def get_fps(source, Videolength):
cap = cv2.VideoCapture(source)
frame_counter = 0
print "Calculating Frames per second . . . "
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
frame_counter += 1
cap.release()
cv2.destroyAllWindows()
fps = float(frame_counter/Videolength)
print "\nFPS is " +str(fps)+"\n"
return fps
#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
def get_fps(source, Videolength):
cap = cv2.VideoCapture("docs/video/traffic2")
frame_counter = 0
print "Calculating Frames per second . . . "
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
frame_counter += 1
cap.release()
cv2.destroyAllWindows()
fps = float(frame_counter/Videolength)
print "\nFPS is " +str(fps)+"\n"
return fps
#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
def main():
NetworkTable.setIPAddress('10.19.37.2')
NetworkTable.setClientMode()
NetworkTable.initialize()
sd = NetworkTable.getTable('SmartDashboard')
#ms_list = []
while True:
time.sleep(0.1)
start_time = datetime.now()
# returns the elapsed milliseconds since the start of the program
vision(sd)
dt = datetime.now() - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
#ms_list.append(ms)
print ms
#print np.mean(ms_list)
cv2.destroyAllWindows()
def main():
NetworkTable.setIPAddress('10.19.37.2')
NetworkTable.setClientMode()
NetworkTable.initialize()
sd = NetworkTable.getTable('SmartDashboard')
#ms_list = []
while True:
time.sleep(0.1)
start_time = datetime.now()
# returns the elapsed milliseconds since the start of the program
vision(sd)
dt = datetime.now() - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
print ms
cv2.destroyAllWindows()
def color_quant(input,K,output):
img = cv2.imread(input)
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
cv2.imshow('res2',res2)
cv2.waitKey(0)
cv2.imwrite(output, res2)
cv2.destroyAllWindows()
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None):
'''Generate test images as solid blocks of colour of known size, save to filename fn.'''
# Create a zero (black) image of HD size with 3 colour dimensions. Colour space assumed BGR by default.
h = 1080
w = 1920
img = np.zeros((h,w,3),dtype="uint8")
# Want to set all of the pixels to bgr tuple, default red, 8 bit colour
if not bgr:
bgr = [0,0,255]
img[:,:] = bgr
vw = ImageViewer(img)
vw.windowShow()
#cv2.imshow("zeroes", frame)
#ch = 0xff & cv2.waitKey(10000)
#cv2.destroyAllWindows()
cv2.imwrite(fn, img)
def show_cut_img(img_name):
img = cv2.imread(img_name, 0)
cut_img = cut(img)
cv2.imshow('cut image', cut_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return cut_img
# ??????????????????id??logoDirs????
def show(im, allobj, S, w, h, cellx, celly):
for obj in allobj:
a = obj[5] % S
b = obj[5] // S
cx = a + obj[1]
cy = b + obj[2]
centerx = cx * cellx
centery = cy * celly
ww = obj[3]**2 * w
hh = obj[4]**2 * h
cv2.rectangle(im,
(int(centerx - ww/2), int(centery - hh/2)),
(int(centerx + ww/2), int(centery + hh/2)),
(0,0,255), 2)
cv2.imshow("result", im)
cv2.waitKey()
cv2.destroyAllWindows()
def save_images(self, dirname='dump'):
import os
img_no = 1
# Makes the directory
if not os.path.exists('./' + dirname):
os.mkdir(dirname)
while True:
self.grab_frame()
if self.debug:
cv2.imshow('frame', self.img)
k = cv2.waitKey(1) & 0xFF
if k == ord('s'):
cv2.imwrite(os.path.join(dirname, 'dump_' + str(img_no) + '.jpg'), self.img)
img_no += 1
elif k == ord('q'):
break
cv2.destroyAllWindows()
# Destructor
def process_video(path_to_video):
cap = cv2.VideoCapture(path_to_video) # Load video
while True:
ret, frame = cap.read()
print frame
if ret is False or (cv2.waitKey(30) & 0xff) == 27: break # Exit if the video ended
mask = np.zeros_like(frame) # init mask
contours = find_contours(frame)
plates, plates_images, mask = find_plate_numbers(frame, contours, mask)
print "Plate Numbers: %s" % ", ".join(plates)
processed_frame = cv2.add(frame, mask) # Apply the mask to image
cv2.imshow('frame', processed_frame)
cv2.destroyAllWindows()
cap.release()
###########################################
# Run The Program #########################
###########################################
def show_img():
global face_rect
#????????????????
while True:
img = cv.QueryFrame(cam)# ????????
#????????
src=cv.CreateImage((img.width, img.height), 8, 3)
cv.Resize(img,src,cv.CV_INTER_LINEAR)
#??????
gray=cv.CreateImage((img.width, img.height), 8, 1)
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)#?rgb???????
cv.EqualizeHist(gray,gray)#????????????
rects = detect(gray, cascade)#???????????????????????????
face_rect=rects
#?????????
draw_rects(src, rects, (0, 255, 0))
#???????
cv.ShowImage('DeepFace Wang_jun_qian', src)
cv2.waitKey(5) == 27
cv2.destroyAllWindows()
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
vidcap = cv2.VideoCapture(video)
fps = get_frame_rate(vidcap)
inc = int(fps * secs)
length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
count = 0
while vidcap.isOpened() and count <= length:
if count % inc == 0:
success, image = vidcap.read()
if success:
cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if fmt == 'PIL':
im = Image.fromarray(cv2_im)
#elif fmt == 'DISK':
#cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
else:
im = cv2_im
yield count, im
else:
break
count += 1
cv2.destroyAllWindows()
vidcap.release()
# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
def pick_corrs(images, n_pts_to_pick=4):
data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
for i, image in enumerate(images)]
for d in data:
win_name = d[6]
cv2.namedWindow(win_name)
cv2.setMouseCallback(win_name, corr_picker_callback, d)
cv2.startWindowThread()
cv2.imshow(win_name, d[5])
key = None
while key != '\n' and key != '\r' and key != 'q':
key = cv2.waitKey(33)
key = chr(key & 255) if key >= 0 else None
cv2.destroyAllWindows()
if key == 'q':
return None
else:
return [d[0] for d in data]
def main(args):
saveFace = None;
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
if len(faces) > 0:
saveFace = frame
break;
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)
mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
myImage = []
for file in onlyfiles:
isImage = None
file = mypath + '/' + file
isImage = imghdr.what(file)
if isImage != None:
myImage.append(file)
#begin facenet
cp.main(args,myImage);
def evaluate(img_col, args):
numpy.seterr(all='ignore')
assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array'
assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim)
assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args))
img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY)
rows, cols = img_gry.shape
crow, ccol = rows/2, cols/2
f = numpy.fft.fft2(img_gry)
fshift = numpy.fft.fftshift(f)
fshift[crow-75:crow+75, ccol-75:ccol+75] = 0
f_ishift = numpy.fft.ifftshift(fshift)
img_fft = numpy.fft.ifft2(f_ishift)
img_fft = 20*numpy.log(numpy.abs(img_fft))
if args.display and not args.testing:
cv2.destroyAllWindows()
scripts.display('img_fft', img_fft)
scripts.display('img_col', img_col)
cv2.waitKey(0)
result = numpy.mean(img_fft)
return img_fft, result, result < args.thresh
def end_game(self):
""" When everything is done, release the capture.
"""
if not self.piCam:
self.cam.release()
quit_coord = (self.screenwidth // 4, self.screenheight // 3)
try:
draw_text(quit_coord, self.photo,
"Press any key to quit_", font_scale=1)
except AttributeError:
cv2.destroyAllWindows()
# self.presentation(frame)
# self.photo = self.overlayUI(self.photo)
else:
self.piCamera.close()
cv2.imshow("PartyPi", self.photo)
cv2.waitKey(0)
cv2.destroyAllWindows()
def read():
db = shelve.open(filename)
imgs = db['imgs']
data = db['data']
for i in range(len(imgs)):
d = data[i]
print(i, d)
img = imgs[i]
img = np.fromstring(img, np.uint8)
frame = cv2.imdecode(img, 1)
print('frame[{}] {}'.format(i, frame.shape))
cv2.imshow('camera', frame)
cv2.waitKey(300)
print('bye ...')
cv2.destroyAllWindows()
db.close()
def record(cam, runtime, mat):
vid = tp.PyERROR_CODE.PyERROR_CODE_FAILURE
out = False
while vid != tp.PyERROR_CODE.PySUCCESS and not out:
filepath = input("Enter filepath name: ")
vid = cam.enable_recording(filepath)
print(repr(vid))
if vid == tp.PyERROR_CODE.PySUCCESS:
print("Recording started...")
out = True
print("Hit spacebar to stop recording: ")
key = False
while key != 32: # for spacebar
err = cam.grab(runtime)
if err == tp.PyERROR_CODE.PySUCCESS:
cam.retrieve_image(mat)
cv2.imshow("ZED", mat.get_data())
key = cv2.waitKey(5)
cam.record()
else:
print("Help: you must enter the filepath + filename + SVO extension.")
print("Recording not started.")
cam.disable_recording()
print("Recording finished.")
cv2.destroyAllWindows()
Modules.py 文件源码
项目:apparent-age-gender-classification
作者: danielyou0230
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def debug_face_classifier(file):
face_cascade = cv2.CascadeClassifier(xml_face_classifier)
image = cv2.imread(file)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(image, 1.07, 3)
print faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
#roi_gray = gray[y:y+h, x:x+w]
#roi_color = image[y:y+h, x:x+w]
cv2.imshow('Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def image(self):
img = cv2.imread(self.image_path)
img = imutils.resize(img,width=min(800,img.shape[1]))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(21,21),0)
fullbody = self.HogDescriptor(gray)
for (x,y,w,h) in fullbody:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
faces = self.haar_facedetection(gray)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = self.haar_eyedetection(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2)
smile = self.haar_smilecascade(roi_gray)
for (sx,sy,sw,sh) in smile:
cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
img = self.dlib_function(img)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def start_video(self, model):
camera = cv2.VideoCapture(0)
while True:
frame = camera.read()[1]
if frame is None:
continue
image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image_array = cv2.resize(image_array, (300, 300))
image_array = substract_mean(image_array)
image_array = np.expand_dims(image_array, 0)
predictions = model.predict(image_array)
detections = detect(predictions, self.prior_boxes)
plot_detections(detections, frame, 0.6,
self.arg_to_class, self.colors)
cv2.imshow('webcam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
def test(path):
cap = cv2.VideoCapture(path_video)
testing=[]
while(True):
ret, frame = cap.read()
res=cv2.resize(frame,(250,250))
gray_image = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
xarr=np.squeeze(np.array(gray_image).astype(np.float32))
m,v=cv2.PCACompute(xarr)
arr= np.array(v)
flat_arr= arr.ravel()
testing.append(flat_arr)
#cv2.imshow('frame', frame)
#if cv2.waitKey(1) & 0xFF == ord("q"):
# break
#cap.release()
#cv2.destroyAllWindows()
logos=svm.predict(testing)
uniqlogos=list(set(logos))
for i in uniqlogos:
print(i)
def cluster(frame_matrix):
new_frame_matrix = []
i = 0
for frame in frame_matrix:
print "reader {} frame".format(i)
i += 1
Z = frame.reshape((-1, 1))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 2
ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((frame.shape))
new_frame_matrix.append(res2)
cv2.imshow('res2', res2)
cv2.waitKey(1)
cv2.destroyAllWindows()
def face_train_video(train_path,subject,max_train,stream):
cap = cv2.VideoCapture(stream)
ret=True
ctr = 0
# minimum 10 frames/images per video
while(ctr < max_train):
# read till end of frames
ret, img = cap.read()
if not ret:
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Recognizing Face", img)
cv2.waitKey(10)
cv2.imwrite( join(train_path,subject)+ "." + str(ctr) +".jpg",img) # writes image to disk
ctr = ctr + 1
cap.release()
cv2.destroyAllWindows()
# predict live feed