def getFrameFromCamera():
name_window = 'camera window'
cv2.namedWindow(name_window)
cap = cv2.VideoCapture(0)
ret, frame_from = cap.read()
output = []
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.flip(frame, -1)
if ret==True:
cv2.imshow(name_window,frame)
cur_key = cv2.waitKey(1)
if cur_key == 27:
break
if cur_key == ord('s'):
output = frame
break
else:
break
# Release everything if job is finished
cap.release()
#out.release()
cv2.destroyAllWindows()
return output
python类namedWindow()的实例源码
def run_main():
# First initialize the camera capture object with the cv2.VideoCapture class.
camera = cv2.VideoCapture(camera_port)
#To create a new window for display
#Disabled it for now as we can't see the image on the cap
#cv2.namedWindow(display, cv2.WINDOW_NORMAL)
# Discard the first few frame to adjust the camera light levels
for i in xrange(ramp_frames):
temp = get_image(camera)
#Now take the image and save it after every 1 second
while(1):
try:
time.sleep(1)
take_save_image(camera)
except Exception, e:
print "Exception occured \n"
print e
pass
def load_images(queue: PriorityQueue,
source: int,
file_path: str,
target_width: int,
target_height: int,
display_progress: bool=False):
window = 'image'
if display_progress:
cv2.namedWindow(window)
for file in iglob(path.join(file_path, '**', '*.jpg'), recursive=True):
buffer = cv2.imread(file)
buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA)
random_priority = random()
queue.put((random_priority, (buffer, source)))
if display_progress:
cv2.imshow(window, buffer)
if (cv2.waitKey(33) & 0xff) == 27:
break
if display_progress:
cv2.destroyWindow(window)
def setupWindow():
filename = getUserSelectedImage()
imageProcessor = ImageProcessor(cv2.imread(filename,0))
colourImage = cv2.imread(filename,1)
image = imageProcessor.getThresholdedImage(False)
granularity = imageProcessor.get_granularity(image, 100)
print("Granularity: {0}".format(granularity))
start_x,start_y,end_x,end_y = get_start_points(image)
image = imageProcessor.encloseMaze(image)
mazerunner = MazeSolver.MazeSolver(image,granularity)
solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y)
if(not solution):
cv2.imshow(MAZE_NAME,image)
else:
solvedImage = draw_solution(solution, colourImage)
solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage)
solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage)
window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Solved Image", 900,900)
cv2.moveWindow("Solved Image",100,100)
cv2.imshow("Solved Image",solvedImage)
print "Press any key to exit"
cv2.waitKey(0)
cv2.destroyAllWindows
def __init__(self):
self.norm_rgb=np.zeros((600,800,3),np.uint8)
self.dst=np.zeros((600,800),np.uint8)
self.b=0
self.g=0
self.r=0
self.lb=0
self.lg=0
self.lr=0
self.m=np.zeros((600,800),np.uint8)
#self.win=cv2.namedWindow("detect")
#self.dst=cv.CreateImage((800,600),8,1)
#cv2.createTrackbar("blue", "detect",0,255,self.change_b)
#cv2.createTrackbar("green","detect",0,255,self.change_g)
#cv2.createTrackbar("red","detect",0,255,self.change_r)
#cv2.createTrackbar("low_blue", "detect",0,255,self.change_lb)
#cv2.createTrackbar("low_green","detect",0,255,self.change_lg)
#cv2.createTrackbar("low_red","detect",0,255,self.change_lr)
def __init__(self):
global color
self.rgb=np.zeros((600,800,3),np.uint8)
self.mask=np.zeros((600,800),np.uint8)
self.hue_val=color
self.scratch=np.zeros((600,800,3),np.uint8)
#cv2.namedWindow("hue")
#cv2.createTrackbar("hue", "hue",self.hue_val,255,self.change)
def __init__(self, name):
self.frame_name = name
cv2.namedWindow(name)
def trackbar_create(label, win_name, v, maxv, scale=1.0):
global trackbars
if label in trackbars:
raise RuntimeError('Duplicate key. %s already created' % label)
trackbars[label] = dict(label=label, win_name=win_name, value=v, scale=scale)
cv2.namedWindow(win_name)
cv2.createTrackbar(label, win_name, v, maxv, trackbar_update)
def mouse_event_create(win_name, cb):
cv2.namedWindow(win_name)
cv2.setMouseCallback(win_name, cb)
def __init__(self, image, filter_size=1, threshold1=0, threshold2=0):
self.image = image
self._filter_size = filter_size
self._threshold1 = threshold1
self._threshold2 = threshold2
def onchangeThreshold1(pos):
self._threshold1 = pos
self._render()
def onchangeThreshold2(pos):
self._threshold2 = pos
self._render()
def onchangeFilterSize(pos):
self._filter_size = pos
self._filter_size += (self._filter_size + 1) % 2
self._render()
cv2.namedWindow('edges')
cv2.createTrackbar('threshold1', 'edges', self._threshold1, 255, onchangeThreshold1)
cv2.createTrackbar('threshold2', 'edges', self._threshold2, 255, onchangeThreshold2)
cv2.createTrackbar('filter_size', 'edges', self._filter_size, 20, onchangeFilterSize)
self._render()
print "Adjust the parameters as desired. Hit any key to close."
cv2.waitKey(0)
cv2.destroyWindow('edges')
cv2.destroyWindow('smoothed')
def __init__(self, video_src):
self.cam = cv2.VideoCapture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('gesture_hci')
# set channel range of skin detection
self.mask_lower_yrb = np.array([44, 131, 80]) # [54, 131, 110]
self.mask_upper_yrb = np.array([163, 157, 155]) # [163, 157, 135]
# create trackbar for skin calibration
self.calib_switch = False
# create background subtractor
self.fgbg = cv2.BackgroundSubtractorMOG2(history=120, varThreshold=50, bShadowDetection=True)
# define dynamic ROI area
self.ROIx, self.ROIy = 200, 200
self.track_switch = False
# record previous positions of the centroid of ROI
self.preCX = None
self.preCY = None
# A queue to record last couple gesture command
self.last_cmds = FixedQueue()
# prepare some data for detecting single-finger gesture
self.fin1 = cv2.imread('./test_data/index1.jpg')
self.fin2 = cv2.imread('./test_data/index2.jpg')
self.fin3 = cv2.imread('./test_data/index3.jpg')
# switch to turn on mouse input control
self.cmd_switch = False
# count loop (frame), for debugging
self.n_frame = 0
# On-line Calibration for skin detection (bug, not stable)
tarfile_calibration.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 87
收藏 0
点赞 0
评论 0
def display(win_name, img):
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow( win_name, numpy.asarray( img[:,:] ))
k=-1
while k ==-1:
k=waitkey()
cv2.destroyWindow(win_name)
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
kalibr_camera_focus.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def __init__(self, topic):
self.topic = topic
self.windowNameOrig = "Camera: {0}".format(self.topic)
cv2.namedWindow(self.windowNameOrig, 2)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(self.topic, Image, self.callback)
def cvShowImageColor(image_file):
image_bgr = cv2.imread(image_file)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image_bgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
# OpenCV???????????????
def cvShowImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image_gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Matplot??????????
def drawingDemo():
img = emptyImage()
# ??2?????
drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2)
# ???-1???????????????
drawCircle(img, (300, 100), 80, (0, 255, 0), -1)
# ????????
drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1)
drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3)
# ?????
drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1)
# ???????
pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32)
drawPolylines(img, pts, True, (255, 100, 100), 5)
# ???????
drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2)
cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE)
cv2.imshow('DrawingDemo', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def run(self):
print ("VEDIO server starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("video server <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += self.sock.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += self.sock.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
try:
cv2.imshow('Remote', frame)
except:
pass
if cv2.waitKey(1) & 0xFF == 27:
break
def run(self):
print ("VEDIO client starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("video client <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
print ("receive authend")
#self.cap = cv2.VideoCapture(0)
self.cap = cv2.VideoCapture("test.mp4")
if self.showme:
cv2.namedWindow('You', cv2.WINDOW_NORMAL)
print ("remote VEDIO client connected...")
while self.cap.isOpened():
ret, frame = self.cap.read()
if self.showme:
cv2.imshow('You', frame)
if cv2.waitKey(1) & 0xFF == 27:
self.showme = False
cv2.destroyWindow('You')
if self.level > 0:
frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
data = pickle.dumps(frame)
zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
try:
self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
print("video send ", len(zdata))
except:
break
for i in range(self.interval):
self.cap.read()
def run(self):
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
if self.showme:
cv2.namedWindow('You', cv2.WINDOW_NORMAL)
print("VEDIO client connected...")
while self.cap.isOpened():
ret, frame = self.cap.read()
if self.showme:
cv2.imshow('You', frame)
if cv2.waitKey(1) & 0xFF == 27:
self.showme = False
cv2.destroyWindow('You')
sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
data = pickle.dumps(sframe)
zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
try:
self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
except:
break
for i in range(self.interval):
self.cap.read()
def __init__(self, video_src = 0, interactive = True, video = 'vss.avi', fallback = 'synth:bg=./data/hi.jpg:noise=0.05', save_frames = False, frame_dir = './data', frame_prefix='frame-'):
cam = create_capture(video_src, fallback=fallback)
vwriter = None
if interactive:
print("q to quit")
window = 'video square search'
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
else:
vwriter = VideoWriter(video)
run = True
t = clock()
frameCounter = 0
while run:
ret, img = cam.read()
if interactive:
print("read next image")
squares = find_squares(img, 0.2)
nOfSquares = len(squares)
cv2.drawContours( img, squares, -1, (0, 255, 0), 3 )
dt = clock() - t
draw_str(img, (80, 80), 'time: %.1f ms found = %d' % (dt*1000, nOfSquares), 2.0)
if interactive:
cv2.imshow(window, img)
print('q to quit, n for next')
ch = 0xff & cv2.waitKey(100)
if ch == ord('q'):
run = False
elif ch == ord('n'):
continue
else:
vwriter.addFrame(img)
if save_frames:
fn = os.path.join(frame_dir, '%s-%04d.%s' % (frame_prefix, frameCounter, 'jpg'))
print("Saving %d frame to %s" % (frameCounter, fn))
cv2.imwrite(fn, img)
frameCounter+=1
if vwriter:
vwriter.finalise()