def create_tensor(file1,mean_array):
video_1 = cv2.VideoCapture(file1)
len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
tensor_1 = np.zeros([3,len_1,112,112])
count = 0
ret = True
while True:
ret, frame_1 = video_1.read()
if frame_1 is not None:
tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
count = count+1
print count
else:
break
return tensor_1
python类VideoCapture()的实例源码
def create_tensor(file1,mean_array):
video_1 = cv2.VideoCapture(file1)
len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
tensor_1 = np.zeros([3,len_1,112,112])
count = 0
ret = True
while True:
ret, frame_1 = video_1.read()
if frame_1 is not None:
tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
count = count+1
print count
else:
break
tensor = tensor_1[:,:count,:,:]
return tensor
def create_tensor(file1,mean_array):
video_1 = cv2.VideoCapture(file1)
len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
tensor_1 = np.zeros([3,len_1,112,112])
count = 0
ret = True
while True:
ret, frame_1 = video_1.read()
if frame_1 is not None:
tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
count = count+1
print count
else:
break
return tensor_1
def create_tensor(file1,mean_array):
video_1 = cv2.VideoCapture(file1)
len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
tensor_1 = np.zeros([3,len_1,112,112])
count = 0
ret = True
while True:
ret, frame_1 = video_1.read()
if frame_1 is not None:
tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
count = count+1
print count
else:
break
tensor = tensor_1[:,:count,:,:]
return tensor
def create_tensor(file1,mean_array):
video_1 = cv2.VideoCapture(file1)
len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
tensor_1 = np.zeros([3,len_1,112,112])
count = 0
ret = True
while True:
ret, frame_1 = video_1.read()
if frame_1 is not None:
tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
count = count+1
print count
else:
break
tensor = tensor_1[:,:count,:,:]
return tensor
def start_video(self, model):
camera = cv2.VideoCapture(0)
while True:
frame = camera.read()[1]
if frame is None:
continue
image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image_array = cv2.resize(image_array, (300, 300))
image_array = substract_mean(image_array)
image_array = np.expand_dims(image_array, 0)
predictions = model.predict(image_array)
detections = detect(predictions, self.prior_boxes)
plot_detections(detections, frame, 0.6,
self.arg_to_class, self.colors)
cv2.imshow('webcam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
def initialize(self):
# Initialize video capture
self.cap = cv2.VideoCapture(self.ID)
frameRate = 20.0
frameWidth = 640
frameHeight = 480
if cv2.__version__[0] == "2":
# Latest Stable Version (2.x)
self.cap.set(cv2.cv.CV_CAP_PROP_FPS, frameRate)
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameWidth)
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameHeight)
else:
# version 3.1.0 (BETA)
self.cap.set(cv2.CAP_PROP_FPS, frameRate)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)
self.thresh = 0.4
self.thresh_img = np.zeros((frameHeight, frameWidth, 3), dtype=np.uint8)
def capture(self):
capture = cv2.VideoCapture(self.device)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
if not capture.isOpened():
raise Exception('Failed to open camera capture.')
for _ in range(0, 10):
ret, img = capture.read()
if not ret or self._blur_index(img) < self.blur_thres:
time.sleep(0.5)
continue
capture.release()
return img
capture.release()
raise Exception('Failed to capture image.')
def start():
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
FaceArray=getFaceArray(frame)
img2=frame
for r in FaceArray :
img2=cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
img3 = frame[r[1]:r[3], r[0]:r[2]] # ?????????????
feature=Tools.get_feature(img3)
name=readFace(feature)
font=cv2.FONT_HERSHEY_SIMPLEX
img2= cv2.putText(img2,name,(r[1],r[3]), font, 1,(255,255,255),2)
cv2.imshow('frame',img2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def get_frames(file_str):
'''
string => None
This function takes in the source of a video, samples from
the video and writes those samples to a folder
'''
vid = cv2.VideoCapture(file_str)
if vid.isOpened():
frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
step_size = int(1/float(pct_frames))
for count in xrange(0,frame_count,step_size):
w_path = write_path(file_str,count)
vid.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
ret, frame = vid.read()
cv2.imwrite(w_path,frame)
count+=step_size
vid.release()
else:
print 'unable to open file: {}'.format(file_str)
def get_frames(file_str):
'''
string => None
This function takes in the source of a video, samples from
the video and writes those samples to a folder
'''
vid = cv2.VideoCapture(file_str)
if vid.isOpened():
frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
step_size = int(1/float(pct_frames))
for count in xrange(0,frame_count,step_size):
w_path = write_path(file_str,count)
vid.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
ret, frame = vid.read()
count+=step_size
return frame
vid.release()
else:
print 'unable to open file: {}'.format(file_str)
def read_video(self):
vid = cv2.VideoCapture(self.video_path)
if vid.isOpened():
frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
self.predictions = np.zeros((frame_count,100,100,3))#need to know frame size
for count in xrange(frame_count):
ret,frame = vid.read() #probably don't want to get every frame
processed_frame = self.process_frame(frame)
self.predictions[count] = processed_frame
vid.release()
else:
print 'unable to open file: {}'.format(file_str)
#maybe should separate this algo, or somehow automatically detect what the model accepts
#should probably convert to float32, divide by 255.
def __init__(self, name, ui=myo_emg.Ui_MainWindow(), cap=capture.capture()):
super(VideoThread, self).__init__()
self.flag = True
self.start_flag = False
self.support_flag = True
self.name = name
self.cap = cap
self.ui = ui
self.out = None
self.stop_signal.connect(self.stop_play)
self.image_siganl.connect(self.saving_video)
self.start_signal.connect(self.start_capture)
self.cap.path_signal.connect(self.save_video)
if self.name == "Video":
self.videoLabel = ui.Video
self.camera = cv2.VideoCapture("instruction.mp4")
self.fps = self.camera.get(cv2.CAP_PROP_FPS)
elif self.name == "Camera":
self.videoLabel = ui.Camera
self.camera = cv2.VideoCapture(camera_port)
def test(path):
cap = cv2.VideoCapture(path_video)
testing=[]
while(True):
ret, frame = cap.read()
res=cv2.resize(frame,(250,250))
gray_image = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
xarr=np.squeeze(np.array(gray_image).astype(np.float32))
m,v=cv2.PCACompute(xarr)
arr= np.array(v)
flat_arr= arr.ravel()
testing.append(flat_arr)
#cv2.imshow('frame', frame)
#if cv2.waitKey(1) & 0xFF == ord("q"):
# break
#cap.release()
#cv2.destroyAllWindows()
logos=svm.predict(testing)
uniqlogos=list(set(logos))
for i in uniqlogos:
print(i)
def face_train_video(train_path,subject,max_train,stream):
cap = cv2.VideoCapture(stream)
ret=True
ctr = 0
# minimum 10 frames/images per video
while(ctr < max_train):
# read till end of frames
ret, img = cap.read()
if not ret:
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Recognizing Face", img)
cv2.waitKey(10)
cv2.imwrite( join(train_path,subject)+ "." + str(ctr) +".jpg",img) # writes image to disk
ctr = ctr + 1
cap.release()
cv2.destroyAllWindows()
# predict live feed
def receive():
'''
1. Locate screen
2. Follow the variations of intensity in the screen
'''
sampling_period = 1/SAMPLING_FREQUENCY
f = open(EXCHANGE_FILE_PATH, 'w')
f.write('')
x,y,w,h = screen_position()
if((x,y,w,h) == (-1,-1,-1,-1)):
print("Unable to detect screen")
return
cap = cv2.VideoCapture(0)
values = []
try:
while(True):
ret, frame = cap.read()
sub_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)[y:y+h, x:x+w]
values.append(str(np.mean(sub_frame)))
except KeyboardInterrupt:
pass
f.write('\n'.join(values))
f.close()
decode()
def recognize_video(face_recognizer):
cap = cv2.VideoCapture(0)
while True:
if cap.grab():
ref,image = cap.retrieve()
image_grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
faces = FACE_CASCADE.detectMultiScale(image_grey,scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0)
for x,y,w,h in faces:
sub_img=image_grey[y:y+h,x:x+w]
img=image[y:y+h,x:x+w]
nbr,conf = face_recognizer.predict(sub_img)
cv2.rectangle(image,(x-5,y-5),(x+w+5,y+h+5),(255, 255,0),2)
cv2.putText(image,Data_list[nbr],(x,y-10), FONT, 0.5,(255,255,0),1)
cv2.imshow("Faces Found",image)
if (cv2.waitKey(1) & 0xFF == ord('q')) or (cv2.waitKey(1) & 0xFF == ord('Q')):
break
Datafile["Data"]=Data_list
Datafile.close()
cap.release()
cv2.destroyAllWindows()
def video_emitter(video):
# Open the video
video = cv2.VideoCapture(video)
print(' emitting.....')
# read the file
while (video.isOpened):
# read the image in each frame
success, image = video.read()
# check if the file has read the end
if not success:
break
# convert the image png
ret, jpeg = cv2.imencode('.png', image)
# Convert the image to bytes and send to kafka
producer.send_messages(topic, jpeg.tobytes())
# To reduce CPU usage create sleep time of 0.2sec
time.sleep(0.2)
# clear the capture
video.release()
print('done emitting')
def __init__ (self, path, queue_size = 128):
self.stream = cv2.VideoCapture(path)
self.exit = False
self.queue = Queue(maxsize=queue_size)
def __init__(self, cubo, settings):
"""TODO: to be defined1. """
self.camera_id = DEFAULT_CAMERA
self.cap = cv2.VideoCapture(self.camera_id)
self.settings = settings
self.cubo = cubo
self.lastmov = 0
self.lastmovtam = 1
self.status = ""
self.reset()