def grab(cam, queue, width, height, fps):
global running
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capture.set(cv2.CAP_PROP_FPS, fps)
while(running):
frame = {}
capture.grab()
retval, img = capture.retrieve(0)
frame["img"] = img
frame["1"] = config["1"]
frame["2"] = config["2"]
blur = get_blur(img, 0.05)
frame["blur"] = blur
if queue.qsize() < 10:
queue.put(frame)
else:
print(queue.qsize())
python类VideoCapture()的实例源码
def load_videos(video_file):
# print "load_videos"
capture = cv2.VideoCapture(video_file)
read_flag, frame = capture.read()
vid_frames = []
i = 1
# print read_flag
while (read_flag):
# print i
if i % 10 == 0:
vid_frames.append(frame)
# print frame.shape
read_flag, frame = capture.read()
i += 1
vid_frames = np.asarray(vid_frames, dtype='uint8')[:-1]
# print 'vid shape'
# print vid_frames.shape
capture.release()
print i
return vid_frames
def CaptureImage():
imageName = 'DontCare.jpg' #Just a random string
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
rgbImage = frame #For capture the image in RGB color space
# Display the resulting frame
cv2.imshow('Webcam',rgbImage)
#Wait to press 'q' key for capturing
if cv2.waitKey(1) & 0xFF == ord('q'):
#Set the image name to the date it was captured
imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
#Save the image
cv2.imwrite(imageName, rgbImage)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#Returns the captured image's name
return imageName
def do_key_press(symbol, modifiers):
global cur_vector
print("SO: {}".format(symbol))
if(symbol == key.R):
if theApp.use_camera:
theApp.set_camera_recording(not theApp.camera_recording)
if(symbol == key.T):
theApp.show_camera = not theApp.show_camera
elif(symbol == key.SPACE):
print("SPACEBAR")
snapshot(None);
elif(symbol == key.ESCAPE):
print("ESCAPE")
cv2.destroyAllWindows()
if theApp.use_camera:
cv2.VideoCapture(0).release()
sys.exit(0)
def showVideoInfo(video_path):
try:
vhandle = cv2.VideoCapture(video_path) # For read Chinease-name video
fps = vhandle.get(cv2.CAP_PROP_FPS)
count = vhandle.get(cv2.CAP_PROP_FRAME_COUNT)
size = (int(vhandle.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vhandle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
ret, firstframe = vhandle.read()
if ret:
print("FPS: %.2f" % fps)
print("COUNT: %.2f" % count)
print("WIDTH: %d" % size[0])
print("HEIGHT: %d" % size[1])
return vhandle, fps, size, firstframe
else:
print("Video can not read!")
except:
"Error in showVideoInfo"
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
cap = cv2.VideoCapture(src)
while(1):
ret, frame = cap.read()
# To speed up processing; Almost real-time on my PC
frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
frame = cv2.flip(frame, 1)
out = func(frame, args)
if out is None:
continue
out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
cv2.imshow(win_name, out)
cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
k = cv2.waitKey(delim_wait)
if k == delim_key:
cv2.destroyAllWindows()
cap.release()
return
def video (seconds, frameRate):
cap = cv2.VideoCapture(0)
if(not cap.isOpened()):
return "error"
# Define the codec and create VideoWriter object
fourcc = cv2.cv.CV_FOURCC(*'XVID')
name = "media/video/" + time.strftime("%d-%m-%Y_%X")+".avi"
out = cv2.VideoWriter(name, fourcc, frameRate, (640,480))
program_starts = time.time()
result = subprocess.Popen(["ffprobe", name], stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True)
nFrames=0
while(nFrames<seconds*frameRate):
ret, frame = cap.read()
if ret==True:
out.write(frame)
nFrames += 1
else:
break
cap.release()
return name
def cvCaptureVideo():
capture = cv2.VideoCapture(0)
if capture.isOpened() is False:
raise("IO Error")
cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)
while True:
ret, image = capture.read()
if ret == False:
continue
cv2.imshow("Capture", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
# Matplot???Web????????????
def __init__(self ,ip, port, level, version):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (ip, port)
if level <= 3:
self.interval = level
else:
self.interval = 3
self.fx = 1 / (self.interval + 1)
if self.fx < 0.3:
self.fx = 0.3
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
self.cap = cv2.VideoCapture(0)
def __init__(self ,ip, port, showme, level, version):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (ip, port)
self.showme = showme
if level == 0:
self.interval = 0
elif level == 1:
self.interval = 1
elif level == 2:
self.interval = 2
else:
self.interval = 3
self.fx = 1 / (self.interval + 1)
if self.fx < 0.3:
self.fx = 0.3
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
self.cap = cv2.VideoCapture(0)
print("VEDIO client starts...")
def MoG2(vid, min_thresh=800, max_thresh=10000):
'''
Args : Video object and threshold parameters
Returns : None
'''
cap = cv2.VideoCapture(vid)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
fgbg = cv2.createBackgroundSubtractorMOG2()
connectivity = 4
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
output = cv2.connectedComponentsWithStats(
fgmask, connectivity, cv2.CV_32S)
for i in range(output[0]):
if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
cv2.imshow('detection', frame)
cap.release()
cv2.destroyAllWindows()
def write():
os.remove(filename)
cap = cv2.VideoCapture(0)
db = shelve.open(filename)
imgs = []
data = range(100)
for i in range(100):
ret, frame = cap.read()
if ret:
# jpg = frame # 29 MB
# jpg = cv2.imencode('.jpg', frame) # make much smaller (1.9MB), otherwise 29MB
jpg = cv2.imencode('.jpg', frame)[1].tostring() # no bennefit with doing string (1.9MB)
imgs.append(jpg)
print('frame[{}] {}'.format(i, frame.shape))
time.sleep(0.03)
db['imgs'] = imgs
db['data'] = data
cap.release()
db.close()
def video3d(self, filename, color=False, skip=True):
cap = cv2.VideoCapture(filename)
nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if skip:
frames = [x * nframe / self.depth for x in range(self.depth)]
else:
frames = [x for x in range(self.depth)]
framearray = []
for i in range(self.depth):
cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
ret, frame = cap.read()
frame = cv2.resize(frame, (self.height, self.width))
if color:
framearray.append(frame)
else:
framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
cap.release()
return np.array(framearray)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--weight_dir', default='weights', type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--gpu', default='', type=str)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
yolo = YOLONet(False)
weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
detector = Detector(yolo, weight_file)
# detect from camera
# cap = cv2.VideoCapture(-1)
# detector.camera_detector(cap)
# detect from image file
imname = 'test/person.jpg'
detector.image_detector(imname)
def get_fps(source, Videolength):
cap = cv2.VideoCapture(source)
frame_counter = 0
print "Calculating Frames per second . . . "
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
frame_counter += 1
cap.release()
cv2.destroyAllWindows()
fps = float(frame_counter/Videolength)
print "\nFPS is " +str(fps)+"\n"
return fps
#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
def get_fps(source, Videolength):
cap = cv2.VideoCapture("docs/video/traffic2")
frame_counter = 0
print "Calculating Frames per second . . . "
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
frame_counter += 1
cap.release()
cv2.destroyAllWindows()
fps = float(frame_counter/Videolength)
print "\nFPS is " +str(fps)+"\n"
return fps
#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
def dump_frames(vid_path):
import cv2
video = cv2.VideoCapture(vid_path)
vid_name = vid_path.split('/')[-1].split('.')[0]
out_full_path = os.path.join(out_path, vid_name)
fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
try:
os.mkdir(out_full_path)
except OSError:
pass
file_list = []
for i in xrange(fcount):
ret, frame = video.read()
assert ret
cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
access_path = '{}/{:06d}.jpg'.format(vid_name, i)
file_list.append(access_path)
print '{} done'.format(vid_name)
sys.stdout.flush()
return file_list
build_of.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def dump_frames(vid_path):
import cv2
video = cv2.VideoCapture(vid_path)
vid_name = vid_path.split('/')[-1].split('.')[0]
out_full_path = os.path.join(out_path, vid_name)
fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
try:
os.mkdir(out_full_path)
except OSError:
pass
file_list = []
for i in xrange(fcount):
ret, frame = video.read()
assert ret
cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
access_path = '{}/{:06d}.jpg'.format(vid_name, i)
file_list.append(access_path)
print '{} done'.format(vid_name)
sys.stdout.flush()
return file_list
def __init__(self,labels,video_file,box_saver,border=30):
"""
the GUI Labeler
:param labels: the labels name string list
:param video_file: the video file path
:param border: the border of the center clip filed (white line around the video)
:param save_dir: label result save path
:param save_im: if write every cropped image to each label directory
"""
self.cam = cv2.VideoCapture(video_file)
self.video_stat = VideoStat(border)
self.label_stat = LabelStat(labels)
self.labels=labels
self.box_saver=box_saver
cv2.setMouseCallback("video", self.video_click)
cv2.setMouseCallback("label", self.label_click)
self.run()
def process_video(path_to_video):
cap = cv2.VideoCapture(path_to_video) # Load video
while True:
ret, frame = cap.read()
print frame
if ret is False or (cv2.waitKey(30) & 0xff) == 27: break # Exit if the video ended
mask = np.zeros_like(frame) # init mask
contours = find_contours(frame)
plates, plates_images, mask = find_plate_numbers(frame, contours, mask)
print "Plate Numbers: %s" % ", ".join(plates)
processed_frame = cv2.add(frame, mask) # Apply the mask to image
cv2.imshow('frame', processed_frame)
cv2.destroyAllWindows()
cap.release()
###########################################
# Run The Program #########################
###########################################
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
vidcap = cv2.VideoCapture(video)
fps = get_frame_rate(vidcap)
inc = int(fps * secs)
length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
count = 0
while vidcap.isOpened() and count <= length:
if count % inc == 0:
success, image = vidcap.read()
if success:
cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if fmt == 'PIL':
im = Image.fromarray(cv2_im)
#elif fmt == 'DISK':
#cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
else:
im = cv2_im
yield count, im
else:
break
count += 1
cv2.destroyAllWindows()
vidcap.release()
# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
def main():
# Get Model:
model_file = open('Data/Model/model.json', 'r')
model = model_file.read()
model_file.close()
model = model_from_json(model)
model.load_weights("Data/Model/weights.h5")
# Get camera:
cap = cv2.VideoCapture(0)
# Open game in browser:
open_game(browser='chrome', url='http://apps.thecodepost.org/trex/trex.html')
while 1:
# Get image from camera:
ret, img = cap.read()
Y = predict(model, img)
if Y == 0:
release()
elif Y == 1:
press()
cap.release()
def main(args):
saveFace = None;
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
if len(faces) > 0:
saveFace = frame
break;
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)
mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
myImage = []
for file in onlyfiles:
isImage = None
file = mypath + '/' + file
isImage = imghdr.what(file)
if isImage != None:
myImage.append(file)
#begin facenet
cp.main(args,myImage);
def start(self):
"""
Create stream object.
:return: stream
"""
if self.protocol is "image":
image = cv2.imread(self.ip_address, 1)
plate = self.analize_plate.proccess(
cv2.imencode('.jpg', image)[1].tostring())
if plate:
print plate['results']
else:
stream = cv2.VideoCapture(self.url)
self.proccess(stream)
# return stream
def initialize_webcam(self):
""" Initialize camera and screenwidth and screenheight.
"""
device = 'raspberry' if 'raspberrypi' in os.uname() else None
self.raspberry = True if 'raspberry' == device else False
if self.piCam:
camera = self.setup_picamera()
self.piCamera = camera
return
cam = cv2.VideoCapture(0)
frame = None
while frame is None:
try:
_, frame = cam.read()
# Update class variables.
self.screenheight, self.screenwidth = frame.shape[:2]
cam.set(3, self.screenwidth)
cam.set(4, self.screenheight)
except:
pass
self.cam = cam
return
def grab(cam, queue, width, height, fps):
global running
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capture.set(cv2.CAP_PROP_FPS, fps)
while(running):
frame = {}
capture.grab()
retval, img = capture.retrieve(0)
frame["img"] = img
frame["1"] = config["1"]
frame["2"] = config["2"]
blur = get_blur(img, 0.05)
frame["blur"] = blur
if queue.qsize() < 10:
queue.put(frame)
else:
print(queue.qsize())
def main():
# cap = cv2.VideoCapture(0)
os.system("cd /dev")
os.system("v4l2-ctl --set-fmt-video=width=1920,height=1080,pixelformat=1")
os.system("cd ~/CanLauncher")
os.system("config-pin -a P9_14 pwm")
os.system("config-pin -a P9_21 pwm")
os.system("config-pin -a P9_22 pwm")
GPIO.setup(startButton, GPIO.IN)
GPIO.setup(confirmButton, GPIO.IN)
# GPIO.setup(launchButton, GPIO.IN)
time.sleep(0.5)
boom()
def startCapture(self):
global new_user_added
if new_user_added == True:
self.initDir()
self.capturing = True
self.capture = cv2.VideoCapture(camera_port)
self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.video_size.width())
self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.video_size.height())
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.display_video_stream)
self.timer.start(30)
else:
self.messageLbl.setText('Warning: First create new user')
def spaced_frames(parser, start=None, end=None, interval=None, num_samples=None, fuzz=4):
if (interval is None and num_samples is None) or None not in (interval, num_samples):
raise ValueError('exactly one of (interval, num_samples) must be set')
vc = cv2.VideoCapture(parser.stream)
video_length = vc.get(7) / vc.get(5)
if not start or start < 0:
start = 0
if not end or end > video_length:
end = video_length
total_time = end - start
if not num_samples:
num_samples = total_time // interval
for time in np.linspace(start, end, num=num_samples):
time += randint(-1 * fuzz, fuzz) / vc.get(5)
time = min([max([0, time]), total_time])
vc.set(0, int(time * 1000))
success, frame = vc.read()
if success:
yield (time, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
return
def create_tensor(file1,mean_array):
video_1 = cv2.VideoCapture(file1)
# use cv to get frame number is not correct
len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
tensor_1 = np.zeros([3,len_1,112,112])
count = 0
ret = True
while True:
ret, frame_1 = video_1.read()
if frame_1 is not None:
tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
count = count+1
print count
else:
break
pdb.set_trace()
tensor = tensor_1[:,:count,:,:]
return tensor