def rotating_example():
img = cv2.imread('./data/hi.jpg')
vwriter = VideoWriterRGB('hi-video.avi')
frames = 0
for angle in range(0, 360, 5):
rot = imutils.rotate(img, angle=angle)
cv2.imshow("Angle = %d" % (angle), rot)
vwriter.addFrame(rot)
frames += 1
for angle in range(360, 0, -5):
rot = imutils.rotate(img, angle=angle)
cv2.imshow("Angle = %d" % (angle), rot)
vwriter.addFrame(rot)
frames += 1
vwriter.finalise()
print("Created movie with %d frames" % frames)
python类video()的实例源码
def __init__(self, output='video.avi', fps=20, codec=["M", "J", "P", "G"]):
self.output = output
self.fps = fps
self.codec = codec
self.fourcc = cv2.VideoWriter_fourcc(*self.codec)
self.writer = None
(self.h, self.w) = (None, None)
self.zeros = None
def addFrame(self, frame, width=300):
frame = imutils.resize(frame, width)
# check if the writer is None
if self.writer is None:
# store the image dimensions, initialzie the video writer,
# and construct the zeros array
(self.h, self.w) = frame.shape[:2]
self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
(self.w * 2, self.h * 2), True)
self.zeros = np.zeros((self.h, self.w), dtype="uint8")
# break the image into its RGB components, then construct the
# RGB representation of each frame individually
(B, G, R) = cv2.split(frame)
R = cv2.merge([self.zeros, self.zeros, R])
G = cv2.merge([self.zeros, G, self.zeros])
B = cv2.merge([B, self.zeros, self.zeros])
# construct the final output frame, storing the original frame
# at the top-left, the red channel in the top-right, the green
# channel in the bottom-right, and the blue channel in the
# bottom-left
output = np.zeros((self.h * 2, self.w * 2, 3), dtype="uint8")
output[0:self.h, 0:self.w] = frame
output[0:self.h, self.w:self.w * 2] = R
output[self.h:self.h * 2, self.w:self.w * 2] = G
output[self.h:self.h * 2, 0:self.w] = B
# write the output frame to file
self.writer.write(output)
def __init__(self, output='video.avi', fps=20, codec=["M", "J", "P", "G"]):
self.output = output
self.fps = fps
self.codec = codec
self.fourcc = cv2.VideoWriter_fourcc(*self.codec)
self.writer = None
(self.h, self.w) = (None, None)
def addFrame(self, frame, width=600):
frame = imutils.resize(frame, width)
# check if the writer is None
if self.writer is None:
# store the image dimensions, initialzie the video writer,
(self.h, self.w) = frame.shape[:2]
self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
(self.w, self.h), True)
# write the output frame to file
self.writer.write(frame)
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
saber_track.py 文件源码
项目:Vision_Processing-2016
作者: Sabercat-Robotics-4146-FRC
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def update ( self ):
cap = self.capture.read( ) # Capture the frame from the webcam
show_cap = cap.copy()
# Render needed video outputs
if self.settings["hsv"]:
hsv = cv2.cvtColor( cap, cv2.COLOR_BGR2HSV )
self.show( 'hsv', hsv )
if self.track: # if the program should track an item
for k, v in self.limits.items( ): # Cycle through each item in the limits
if k in self.settings["filters"]: # if the value is in the filters given,
v[0] = self.get_upper_trackbar( k ) # set the upper to the trackbar value
v[1] = self.get_lower_trackbar( k ) # set the lower to the trackbar value
self.get_bounding_rect( k, cap, show_cap, k, v[0], v[1] ) # Get the bounding rect of the capture with limits
if self.settings["original"]:
self.show( 'original', show_cap )
def init_vision(self):
self.set_default_context()
# read cascades
self.log.info("reading haar cascades")
self.face_cascade = cv2.CascadeClassifier(dirname(__file__) + '/cascades/haarcascade_frontalface_alt2.xml')
self.smile_cascade = cv2.CascadeClassifier(dirname(__file__) + '/cascades/haarcascade_smile.xml')
self.leye_cascade = cv2.CascadeClassifier(dirname(__file__) + '/cascades/haarcascade_lefteye.xml')
self.reye_cascade = cv2.CascadeClassifier(dirname(__file__) + '/cascades/haarcascade_righteye.xml')
self.profileface_cascade = cv2.CascadeClassifier(dirname(__file__) + '/cascades/lbpcascade_profileface.xml')
#######video streaming and fps ###########33
self.log.info("initializing videostream")
self.vs = eye(src=0).start()
self.fps = FPS().start()
# TODO distance from cam
self.rect = (161, 79, 150, 150) # random initial guess for foreground/face position for background extraction
self.distance = 0
# initialize the known distance from the camera to the reference eye pic, which
# in this case is 50cm from computer screen
self.KNOWN_DISTANCE = 50.0
# initialize the known object width, which in this case, the human eye is 24mm width average
self.KNOWN_WIDTH = 2.4
# initialize the reference eye image that we'll be using
self.REF_IMAGE_PATH = dirname(__file__) + "/ref/eye.jpg"
# load the image that contains an eye that is KNOWN TO BE 50cm
# from our camera, then find the eye marker in the image, and initialize
# the focal length
image = cv2.imread(self.REF_IMAGE_PATH)
self.radius = 24 # initialize radius for distance counter
marker = self.find_eye_radius(image)
# calculate camera focallenght
self.focalLength = (marker * self.KNOWN_DISTANCE) / self.KNOWN_WIDTH
# TODO read from config file
# bools
self.showfeed = True
self.showboundingboxes = True
self.showdetected = True
######## detected objects ######
self.detectedfaces = []
self.detectedmoods = []
# process first frame
self.feed = self.process_frame()
self.awake = True
if self.awake:
pass
# TODO start vision thread
self.filter = "None"
def tracking (vstate):
print vstate
#The vehicle process images and maintains all data ready to fly in the following state.
#However, it does not send attitude messages as the vehicle is still under manual control.
red1Good = red2Good = False # Set True when returned target offset is reliable.
bearing = offset = 0
target = None # Initialise tuple returned from video stream
# Initialise the FPS counter.
#fps = FPS().start()
while vstate == "tracking":
# grab the frame from the threaded video stream and return left line offset
# We do this to know if we have a 'lock' (goodTarget) as we come off of manual control.
target = vs.readfollow()
bearing = target[0]
red1Good = target[1]
offset = target[2]
red2Good = target[3]
# Print location information for `vehicle` in all frames (default printer)
#print "Global Location: %s" % vehicle.location.global_frame
#print "Global Location (relative altitude): %s" % vehicle.location.global_relative_frame
#print "Local Location: %s" % vehicle.location.local_frame #NEDprint "Local Location: %s" % vehicle.location.local_frame
# print tof_sensor.get_distance()
# update the FPS counter
#fps.update()
# Check if operator has transferred to autopilot using TX switch.
if vehicle.mode == "GUIDED_NOGPS":
# print "In Guided mode..."
if ( red1Good and red2Good ) == True:
# print "In guided mode, setting following..."
vstate = "following"
else:
# print "In guided mode, setting lost..."
vstate = "lost"
# stop the timer and display FPS information
#fps.stop()
#print('Elasped time in tracking state: {:.2f}'.format(fps.elapsed()))
#print('Approx. FPS: {:.2f}'.format(fps.fps()))
# time.sleep(1)
return vstate
#-------------- FUNCTION DEFINITION TO FLY IN VEHICLE STATE FOLLOWING---------------------