def main():
logging.basicConfig(level=logging.DEBUG)
print('Initializing NetworkTables')
# NetworkTables.setTeam(2729)
# NetworkTables.setClientMode()
# NetworkTables.setIPAddress('10.27.29.202')
NetworkTables.initialize(server='roboRIO-2729-frc.local')
print('Creating pipeline')
pipeline = Retrotape()
print('Creating video capture')
# stream = cv2
#cap = cv2.VideoCapture("http://localhost:1181/?action=stream")
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture(http://storm-rpi1.local:1181/?action=stream)
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
cap.set(cv2.CAP_PROP_EXPOSURE, 0)
cap.set(cv2.CAP_PROP_BRIGHTNESS, 30)
print('Running pipeline')
iteration = 0
total = 0
curr_time = datetime.now()
while cap.isOpened():
have_frame, frame = cap.read()
if have_frame:
pipeline.process(frame)
currArea = extra_processing(pipeline)
total += currArea
iteration += 1
# print(iteration)
# print(total)
table = NetworkTables.getTable('Vision')
# ***EQUATION DISTANCE VS AREA*** 53111e^(-1.702x)
# ***Inverse*** ln(A/53111)/-1.702 = d
# ***Inverse Test2 -1.0142ln(.0000578938x)
if(iteration % 200 == 0):
table.putNumber('FPS', 200 / (datetime.now() - curr_time).total_seconds())
curr_time = datetime.now()
# table = NetworkTables.getTable('Vision')
table.putNumber('Average Area', total / 200)
print(total / 200)
iteration = 0
total = 0
scaling = 6.8
estDistance = distanceEstimate(currArea * scaling)
table.putNumber('est_distance', estDistance)
print('Capture closed')
python类CAP_PROP_FRAME_HEIGHT的实例源码
def _cam_setup(self, source, w, h, fps, exposure):
cap = cv2.VideoCapture(source)
if not cap.isOpened():
return None
atexit.register(cap.release)
# Set width/height (note: this fails in OpenCV 2.4.x but works with 3.2)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
# Read a frame, just in case it's needed before setting params
rval, _ = cap.read()
if not rval:
cap.release()
return None
# Hacks using v4l2-ctl to set capture parameters we can't control through OpenCV
v4l2args = []
# Change AWB setting once to make sure new settings are actually used
# Not sure why this is required, but it appears to work.
# Without this, white balance is still somehow automatic even
# after setting it to manual below.
self._v4l2_call("--set-ctrl=white_balance_auto_preset=1")
# Set FPS (OpenCV requests 30, hardcoded)
v4l2args.append("-p %d" % fps)
# Set exposure (shutter speed/ISO)
# exposure_time_absolute is given in multiples of 0.1ms.
# Make sure fps above is not set too high (exposure time
# will be adjusted automatically to allow higher frame rate)
v4l2args.append("--set-ctrl=auto_exposure=1") # 0=auto, 1=manual
v4l2args.append("--set-ctrl=exposure_time_absolute=%d" % exposure)
v4l2args.append("--set-ctrl=white_balance_auto_preset=0")
v4l2args.append("--set-ctrl=red_balance=1000")
v4l2args.append("--set-ctrl=blue_balance=1000")
self._v4l2_call(" ".join(v4l2args))
logging.info("Set exposure via v4l2-ctl. Capturing/dumping frames so settings take effect before tracking starts.")
for _ in range(5):
cap.read()
return cap
def run():
capL = cv2.VideoCapture(1)
capL.set(cv2.CAP_PROP_FRAME_WIDTH, BINCAP_W)
capL.set(cv2.CAP_PROP_FRAME_HEIGHT, BINCAP_H)
capR = cv2.VideoCapture(0)
capR.set(cv2.CAP_PROP_FRAME_WIDTH, BINCAP_W)
capR.set(cv2.CAP_PROP_FRAME_HEIGHT, BINCAP_H)
while True:
disparity = util.getDisparity(capL.read(), capR.read())
cv2.imshow('disparity', disparity)
orient = util.getOriention(disparity)
if orient == 2:
print 'forward'
continue
while orient == 0:
print 'backward'
disparity = util.getDisparity(capL.read(), capR.read())
cv2.imshow('disparity', disparity)
orient = util.getOriention(disparity)
while orient==1:
turn = turnTo(disparity)
# turn left
if turn==3:
while orient!=2:
print 'turn left'
disparity = util.getDisparity(capL.read(), capR.read())
cv2.imshow('disparity', disparity)
orient = util.getOriention(disparity)
# turn right
elif turn==4:
while orient!=2:
print 'turn right'
disparity = util.getDisparity(capL.read(), capR.read())
cv2.imshow('disparity', disparity)
orient = util.getOriention(disparity)
# end
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cap0.release()
cap1.release()
cv2.destroyAllWindows()
def main():
args = parser.parse_args()
mask = cv2.imread(args.mask_file, cv2.IMREAD_COLOR)
cap = cv2.VideoCapture(args.in_video)
last_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1
if args.end_with == -1:
args.end_with = last_frame
else:
if args.end_with > last_frame:
print(
"Warning: specified end frame ({:d})is beyond the last video frame ({:d}). Stopping after last frame.".format(
args.end_with, last_frame))
args.end_with = last_frame
if args.out_video == "":
args.out_video = args.in_video[:-4] + "_masked.mp4"
writer = cv2.VideoWriter(args.out_video, cv2.VideoWriter_fourcc('X', '2', '6', '4'),
cap.get(cv2.CAP_PROP_FPS),
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))), True)
writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())
if args.start_from > 0:
cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_from)
total_frame_span = args.end_with - args.start_from
frame_counter = 0
if args.frame_count == -1:
cur_frame_number = args.start_from
while cur_frame_number < args.end_with:
process_frame(cap, writer, mask)
frame_counter += 1
amount_done = frame_counter / total_frame_span
update_progress(amount_done)
cur_frame_number += 1
else:
frame_interval = total_frame_span // args.frame_count
for i_frame in range(args.start_from, args.end_with, frame_interval):
cap.set(cv2.CAP_PROP_POS_FRAMES, i_frame)
process_frame(cap, writer, mask)
frame_counter += 1
amount_done = frame_counter / args.frame_count
update_progress(amount_done)
cap.release()
writer.release()
return 0
def __init__(self, args, out_postfix="_out", with_video_output=True):
self.global_video_offset = 0
self.flip_video = False
self.datapath = "./"
self.__dict__.update(vars(args))
self.writer = None
if os.path.exists("settings.yaml"):
stream = open("settings.yaml", mode='r')
self.settings = load(stream, Loader=Loader)
stream.close()
self.datapath = self.settings['datapath'].replace("<current_user>", getuser())
print("Processing path: ", self.datapath)
if 'raw_options' in self.settings:
raw_options = self.settings['raw_options']
if self.in_video in raw_options:
self.global_video_offset = raw_options[args.in_video]['global_offset']
self.flip_video = raw_options[args.in_video]['flip']
self.cap = None
self.reload_video()
print("Processing video file {:s}.".format(self.in_video))
last_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)
if self.end_with == -1:
self.end_with = last_frame
else:
if self.end_with > last_frame:
print(("Warning: specified end frame ({:d}) is beyond the last video frame" +
" ({:d}). Stopping after last frame.")
.format(self.end_with, last_frame))
self.end_with = last_frame
print("Frame range: {:d}--{:d}".format(self.start_from, self.end_with))
if with_video_output:
if self.out_video == "":
self.out_video = args.in_video[:-4] + "_" + out_postfix + ".mp4"
self.writer = cv2.VideoWriter(os.path.join(self.datapath, self.out_video),
cv2.VideoWriter_fourcc('X', '2', '6', '4'),
self.cap.get(cv2.CAP_PROP_FPS),
(int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))),
True)
self.writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())
else:
self.writer = None
self.frame = None
self.cur_frame_number = None
def my_detect_scenes_file(path, scene_list, detector_list, stats_writer = None,
downscale_factor = 0, frame_skip = 0, quiet_mode = False,
perf_update_rate = -1, save_images = False,
timecode_list = None):
cap = cv2.VideoCapture()
frames_read = -1
video_fps = -1
if not timecode_list:
timecode_list = [0, 0, 0]
cap.open(path)
# file_name = os.path.split(path)[1]
file_name = path
if not cap.isOpened():
if not quiet_mode:
print('[PySceneDetect] FATAL ERROR - could not open video %s.' %
path)
return (video_fps, frames_read)
elif not quiet_mode:
print('[PySceneDetect] Parsing video %s...' % file_name)
video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_fps = cap.get(cv2.CAP_PROP_FPS)
if not quiet_mode:
print('[PySceneDetect] Video Resolution / Framerate: %d x %d / %2.3f FPS' % (
video_width, video_height, video_fps ))
if downscale_factor >= 2:
print('[PySceneDetect] Subsampling Enabled (%dx, Resolution = %d x %d)' % (
downscale_factor, video_width / downscale_factor, video_height / downscale_factor ))
print('Verify that the above parameters are correct'
' (especially framerate, use --force-fps to correct if required).')
frames_list = []
for tc in timecode_list:
if isinstance(tc, int):
frames_list.append(tc)
elif isinstance(tc, float):
frames_list.append(int(tc * video_fps))
elif isinstance(tc, list) and len(tc) == 3:
secs = float(tc[0] * 60 * 60) + float(tc[1] * 60) + float(tc[2])
frames_list.append(int(secs * video_fps))
else:
frames_list.append(0)
start_frame, end_frame, duration_frames = 0, 0, 0
if len(frames_list) == 3:
start_frame, end_frame, duration_frames = frames_list
frames_read = scenedetect.detect_scenes(cap, scene_list, detector_list, stats_writer,
downscale_factor, frame_skip, quiet_mode,
perf_update_rate, save_images, file_name,
start_frame, end_frame, duration_frames)
cap.release()
return (video_fps, frames_read)
def open_capture(name, frame):
capture = cv2.VideoCapture(name)
width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = capture.get(cv2.CAP_PROP_FPS)
capture.set(cv2.CAP_PROP_POS_FRAMES, frame)
print("Opened ", name, ", resolution ", width, "x", height, ", fps ", fps, flush = True)
return capture
def _load_input_videos(self):
""" Opens and checks that all input video files are valid, can
be processed, and have the same resolution and framerate. """
self.video_resolution = None
self.video_fps = None
self.frames_total = 0
if not len(self.video_paths) > 0:
return False
for video_path in self.video_paths:
cap = cv2.VideoCapture()
cap.open(video_path)
video_name = os.path.basename(video_path)
if not cap.isOpened():
if not self.suppress_output:
print("[DVR-Scan] Error: Couldn't load video %s." % video_name)
print("[DVR-Scan] Check that the given file is a valid video"
" clip, and ensure all required software dependencies"
" are installed and configured properly.")
cap.release()
return False
curr_resolution = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
curr_framerate = cap.get(cv2.CAP_PROP_FPS)
self.frames_total += cap.get(cv2.CAP_PROP_FRAME_COUNT)
cap.release()
if self.video_resolution is None and self.video_fps is None:
self.video_resolution = curr_resolution
self.video_fps = curr_framerate
if not self.suppress_output:
print("[DVR-Scan] Opened video %s (%d x %d at %2.3f FPS)." % (
video_name, self.video_resolution[0],
self.video_resolution[1], self.video_fps))
# Check that all other videos specified have the same resolution
# (we'll assume the framerate is the same if the resolution matches,
# since the VideoCapture FPS information is not always accurate).
elif curr_resolution != self.video_resolution:
if not self.suppress_output:
print("[DVR-Scan] Error: Can't append clip %s, video resolution"
" does not match the first input file." % video_name)
return False
else:
if not self.suppress_output:
print("[DVR-Scan] Appended video %s." % video_name)
# If we get to this point, all videos have the same parameters.
return True
def setup_camera(device_number):
cam = cv2.VideoCapture(device_number)
# result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,cam_width)
# result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,cam_height)
result3 = cam.set(cv2.CAP_PROP_FPS,1)
return cam
def videoSlice(video_path, save_path, progressbarsetter=None, save_type="png", img_comp=0, start_idx=1):
"""
:param video_path:
:param save_path:
:param save_type:
:param img_comp: default0:
None Higher number increase compressive level
png[0-9], jpg[0-100]
:return:
"""
# For read Chinease-name video
vid_handle = cv2.VideoCapture(video_path)
# vid_handle = cv2.VideoCapture(video_path.encode('utf-8'))
fps = vid_handle.get(cv2.CAP_PROP_FPS)
count = vid_handle.get(cv2.CAP_PROP_FRAME_COUNT)
size = (int(vid_handle.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid_handle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
prefix = os.path.basename(save_path)
idx = start_idx # start from 000001.xxx
cnt_idx = 1
params = None
suffix = None
if save_type.upper() == "JPEG" or save_type.upper() == "JPG":
img_type = int(cv2.IMWRITE_JPEG_OPTIMIZE)
suffix = ".jpg"
params = [img_type, img_comp]
elif save_type.upper() == "PNG":
img_type = int(cv2.IMWRITE_PNG_COMPRESSION)
suffix = ".png"
params = [img_type, img_comp]
else:
print("Do not support %s format!" % save_type)
while True:
ret, frame = vid_handle.read()
if ret:
cur_progress = cnt_idx/(count/100.0)
if progressbarsetter is not None:
progressbarsetter(cur_progress)
print("Progress %.2f%%" % cur_progress)
img_name = save_path + "/" + ("%06d" % idx) + suffix
# print img_name
print params
cv2.imwrite(img_name, frame, params)
idx += 1
cnt_idx += 1
else:
break
print("Slicing Done!")
return count
def setup_camera():
cam = cv2.VideoCapture(0)
result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,720)
result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,512)
result3 = cam.set(cv2.CAP_PROP_FPS,1)
return cam
def setup_camera(device_number):
cam = cv2.VideoCapture(device_number)
result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,cam_width)
result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,cam_height)
result3 = cam.set(cv2.CAP_PROP_FPS,1)
return cam