def rgb_callback(self,data):
try:
self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
cv2.imshow('rgb_img',self.rgb_img)
cv2.waitKey(5)
if rgb_ret == True:
rgb_tempimg = self.rgb_img.copy()
cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)
cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
print("The world coordinate system's origin in camera's coordinate system:")
print("===rgb_camera rvec:")
print(rgb_rvec)
print("===rgb_camera rmat:")
print(self.rgb_rmat)
print("===rgb_camera tvec:")
print(self.rgb_tvec)
print("rgb_camera_shape: ")
print(self.rgb_img.shape)
print("The camera origin in world coordinate system:")
print("===camera rmat:")
print(self.rgb_rmat.T)
print("===camera tvec:")
print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))
rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
yaml.dump(data, rgb_stream)
cv2.imshow('rgb_img',rgb_tempimg)
cv2.waitKey(5)
python类namedWindow()的实例源码
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
cameracalibrator.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def run(self):
cv2.namedWindow("display", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)
if self.extra_queue:
cv2.namedWindow("extra", cv2.WINDOW_NORMAL)
while True:
# wait for an image (could happen at the very beginning when the queue is still empty)
while len(self.queue) == 0:
time.sleep(0.1)
im = self.queue[0]
cv2.imshow("display", im)
k = cv2.waitKey(6) & 0xFF
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
elif k == ord('s'):
self.opencv_calibration_node.screendump(im)
if self.extra_queue:
if len(self.extra_queue):
extra_img = self.extra_queue[0]
cv2.imshow("extra", extra_img)
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def ir_callback(self,data):
try:
self.ir_img = self.mkgray(data)
except CvBridgeError as e:
print(e)
ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
cv2.imshow('ir_img',self.ir_img)
cv2.waitKey(5)
if ir_ret == True:
ir_tempimg = self.ir_img.copy()
cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)
cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
# ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)
print("The world coordinate system's origin in camera's coordinate system:")
print("===ir_camera rvec:")
print(ir_rvec)
print("===ir_camera rmat:")
print(self.ir_rmat)
print("===ir_camera tvec:")
print(self.ir_tvec)
print("ir_camera_shape: ")
print(self.ir_img.shape)
print("The camera origin in world coordinate system:")
print("===camera rmat:")
print(self.ir_rmat.T)
print("===camera tvec:")
print(-np.dot(self.ir_rmat.T, self.ir_tvec))
depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
yaml.dump(data, depth_stream)
cv2.imshow('ir_img',ir_tempimg)
cv2.waitKey(5)
def run(im):
im_disp = im.copy()
window_name = "Draw line here."
cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(window_name, 910, 0)
print " Drag across the screen to set lines.\n Do it twice"
print " After drawing the lines press 'r' to resume\n"
l1 = np.empty((2, 2), np.uint32)
l2 = np.empty((2, 2), np.uint32)
list = [l1,l2]
mouse_down = False
def callback(event, x, y, flags, param):
global trigger, mouse_down
if trigger<2:
if event == cv2.EVENT_LBUTTONDOWN:
mouse_down = True
list[trigger][0] = (x, y)
if event == cv2.EVENT_LBUTTONUP and mouse_down:
mouse_down = False
list[trigger][1] = (x,y)
cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
(list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
trigger += 1
else:
pass
cv2.setMouseCallback(window_name, callback)
while True:
cv2.imshow(window_name,im_disp)
key = cv2.waitKey(10) & 0xFF
if key == ord('r'):
# Press key `q` to quit the program
return list
exit()
def get_start_points(image):
window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(MAZE_NAME, 900,900)
cv2.imshow(MAZE_NAME,image)
cv2.moveWindow(MAZE_NAME,100,100)
print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)")
while(True):
key = cv2.waitKey(0)
if key == ord('a'):
print("Using Default Start and End Points")
imageProcessor = ImageProcessor(image)
start_x,start_y = imageProcessor.getDefaultStart(image)
end_x, end_y = imageProcessor.getDefaultEnd(image)
print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y)))
break
elif key == ord ('s'):
print("Please select a start point")
start_x,start_y = get_user_selected_point(image)
print ("Start Point: {0}, please select an end point".format((start_x,start_y)))
end_x,end_y = get_user_selected_point(image)
print("End Pont: {0}".format((end_x,end_y)))
break
else:
print("Invalid")
continue
cv2.destroyAllWindows()
return start_x,start_y,end_x,end_y
def cvCaptureVideo():
capture = cv2.VideoCapture(0)
if capture.isOpened() is False:
raise("IO Error")
cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)
while True:
ret, image = capture.read()
if ret == False:
continue
cv2.imshow("Capture", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
# Matplot???Web????????????
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
def test_minicap():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
while True:
try:
h, w = d._screen.shape[:2]
img = cv2.resize(d._screen, (w/2, h/2))
cv2.imshow('preview', img)
key = cv2.waitKey(1)
if key == 100: # d for dump
filename = time.strftime('%Y%m%d%H%M%S.png')
cv2.imwrite(filename, d._screen)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def dump_2dcoor():
camera = libcpm.Camera()
camera.setup()
runner = get_parallel_runner('../data/cpm.npy')
cv2.namedWindow('color')
cv2.startWindowThread()
cnt = 0
while True:
cnt += 1
m1 = camera.get_for_py(0)
m1 = np.array(m1, copy=False)
m2 = camera.get_for_py(1)
m2 = np.array(m2, copy=False)
o1, o2 = runner(m1, m2)
pts = []
for k in range(14):
pts.append((argmax_2d(o1[:,:,k]),
argmax_2d(o2[:,:,k])))
pts = np.asarray(pts)
np.save('pts{}.npy'.format(cnt), pts)
cv2.imwrite("frame{}.png".format(cnt), m1);
if cnt == 10:
break
def pick_corrs(images, n_pts_to_pick=4):
data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
for i, image in enumerate(images)]
for d in data:
win_name = d[6]
cv2.namedWindow(win_name)
cv2.setMouseCallback(win_name, corr_picker_callback, d)
cv2.startWindowThread()
cv2.imshow(win_name, d[5])
key = None
while key != '\n' and key != '\r' and key != 'q':
key = cv2.waitKey(33)
key = chr(key & 255) if key >= 0 else None
cv2.destroyAllWindows()
if key == 'q':
return None
else:
return [d[0] for d in data]
def test_minicap():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
while True:
try:
h, w = d._screen.shape[:2]
img = cv2.resize(d._screen, (w/2, h/2))
cv2.imshow('preview', img)
key = cv2.waitKey(1)
if key == 100: # d for dump
filename = time.strftime('%Y%m%d%H%M%S.png')
cv2.imwrite(filename, d._screen)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def __init__(self):
""" Initialize the parking spot recognizer """
#Subscribe to topics of interest
rospy.Subscriber("/camera/image_raw", Image, self.process_image)
rospy.Subscriber('/cmd_vel', Twist, self.process_twist)
# Initialize video images
cv2.namedWindow('video_window')
self.cv_image = None # the latest image from the camera
self.dst = np.zeros(IMG_SHAPE, np.uint8)
self.arc_image = np.zeros(IMG_SHAPE, np.uint8)
self.transformed = np.zeros(IMG_SHAPE, np.uint8)
# Declare instance variables
self.bridge = CvBridge() # used to convert ROS messages to OpenCV
self.hsv_lb = np.array([0, 70, 60]) # hsv lower bound to filter for parking lines
self.hsv_ub = np.array([30, 255, 140]) # hsv upper bound
self.vel = None
self.omega = None
self.color = (0,0,255)
def img_callback(self, image):
try:
inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
except CvBridgeError, e:
print e
inImgarr = np.array(inImg)
self.outImg = self.process_image(inImgarr)
# self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))
# cv2.namedWindow("Capture Face")
cv2.imshow('Capture Face', self.outImg)
cv2.waitKey(3)
if self.count == 100*self.cp_rate:
rospy.loginfo("Data Captured!")
rospy.loginfo("Training Data...")
self.fisher_train_data()
rospy.signal_shutdown('done')
def __init__(self):
self.node_name = "hand_gestures"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
# self.cv_window_name = self.node_name
# cv2.namedWindow("Depth Image", 1)
# cv2.moveWindow("Depth Image", 20, 350)
self.bridge = CvBridge()
self.numFingers = RecognizeNumFingers()
self.depth_sub = rospy.Subscriber("/asus/depth/image_raw", Image, self.depth_callback)
self.num_pub = rospy.Publisher('num_fingers', Int32, queue_size=10, latch=True)
# self.img_pub = rospy.Publisher('hand_img', Image, queue_size=10)
rospy.loginfo("Waiting for image topics...")
def img_callback(self, image):
try:
inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
except CvBridgeError, e:
print e
inImgarr = np.array(inImg)
self.outImg = self.process_image(inImgarr)
# self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))
# cv2.namedWindow("Capture Face")
cv2.imshow('Capture Face', self.outImg)
cv2.waitKey(3)
if self.count == 100*self.cp_rate:
rospy.loginfo("Data Captured!")
rospy.loginfo("Training Data...")
self.fisher_train_data()
rospy.signal_shutdown('done')
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'):
self.ale = ALEInterface()
self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
self.ale.setInt("random_seed",123)
self.ale.setInt("frame_skip",frameskip)
romfile = str(ROM_PATH)+str(rom_name)
if not os.path.exists(romfile):
print 'No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.'
self.ale.loadROM(romfile)
self.legal_actions = self.ale.getMinimalActionSet()
self.action_map = dict()
self.windowname = windowname
for i in range(len(self.legal_actions)):
self.action_map[self.legal_actions[i]] = i
# print(self.legal_actions)
self.screen_width,self.screen_height = self.ale.getScreenDims()
print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
self.vis = vis
if vis:
cv2.startWindowThread()
cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing
def __init__(self, files):
if len(files) < 2:
raise Exception('Need at least two files to compare.')
self.image_window = 'Image'
self.threshold_window = 'Threshold'
self.difference_window = 'Difference'
self.files = files
self.tb_threshold = 'Threshold'
self.tb_image = 'Image'
self.current_image = 0
self.image1 = None
self.image2 = None
self.difference = None
self.threshold = 25
self.gray = None
cv2.namedWindow(self.image_window, cv2.WINDOW_AUTOSIZE)
cv2.namedWindow(self.difference_window, cv2.WINDOW_AUTOSIZE)
cv2.namedWindow(self.threshold_window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar(self.tb_image, self.difference_window, 0, len(self.files) - 2, self.selectImage)
cv2.createTrackbar(self.tb_threshold, self.threshold_window, self.threshold, 255, self.renderThreshold)
self.render()
def __init__(self):
self.bridge = cv_bridge.CvBridge()
cv2.namedWindow("input", 1)
cv2.createTrackbar('param1', 'input', 10, 300, nothing)
cv2.createTrackbar('param2', 'input', 15, 300, nothing)
cv2.namedWindow("processed", 1)
self.image_sb = rospy.Subscriber('/usb_cam/image_raw', Image, self.image_callback)
self.motion = Twist()
rate = rospy.Rate(20)
# publish to cmd_vel of the jackal
pub = rospy.Publisher("/jackal_velocity_controller/cmd_vel", Twist, queue_size=10)
while not rospy.is_shutdown():
# publish Twist
pub.publish(self.motion)
rate.sleep()
def HistDemo(self):
img = cv2.imread("lena.jpg")
# invertImg=self.InvertImage(img)
# logImg = self.LogImage(img, 10)
# powerImg = self.PowerImage(img, 2.0)
eqimg = self.HistEQ(img)
# gray = baseTransfer.RGB2Gray(img)
# eqgray = baseTransfer.HistEQ(gray)
reghist = self.ImhistColor(img, 255)
dImg, regMap = self.RegulateHist(eqimg, reghist)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("eq")
cv2.imshow("eq", eqimg)
cv2.namedWindow("reg")
cv2.imshow("reg", dImg)
cv2.waitKey(0)
def WienerFilterDemo(self, img):
img = np.int16(img)
noise = self.noiseGenerator.GuassNoise(img, 0, 10, np.int32(img.size))
nImg = img + noise
fn = np.fft.fftshift(np.fft.fft2(noise))
Sn = np.abs(fn) ** 2.0
ffImg = np.fft.fftshift(np.fft.fft2(img))
Sf = np.abs(ffImg) ** 2.0
H = self.GenerateHDemo(img.shape)
fImg = np.fft.fftshift(np.fft.fft2(img))
gImg = np.fft.ifft2(np.fft.ifftshift(fImg * H))
gImg += noise
fgImg = np.fft.fftshift(np.fft.fft2(gImg))
wH = (H * (np.abs(H) ** 2.0 + Sn / Sf)) / np.abs(H) ** 2.0
H1 = self.IdeaLowHInverse(wH, 500)
ggImg = np.fft.ifft2(np.fft.ifftshift(fgImg * H1))
cv2.namedWindow("orig")
cv2.imshow("orig", np.uint8(img))
cv2.namedWindow("g")
cv2.imshow("g", np.uint8(gImg))
cv2.namedWindow("Wiener filter restore")
cv2.imshow("Wiener filter restore", np.uint8(ggImg))
cv2.waitKey(0)
def FFTDemo(self):
img = cv2.imread("lena.jpg")
baseTransfer = BasePixelTransfer.BasePixelTransfer()
img = baseTransfer.RGB2Gray(img)
fimg = img.copy()
fimg = np.float32(fimg)
# for i in xrange(0, img.shape[0]):
# for j in xrange(0, img.shape[1]):
# fimg[i, j] *= (-1) ** (i + j)
fftimg = np.fft.fftshift(np.fft.fft2(fimg))
ifft = np.real(np.fft.ifft2(fftimg))
for i in xrange(0, ifft.shape[0]):
for j in xrange(0, ifft.shape[1]):
ifft[i, j] *= (-1) ** (i + j)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("fft")
cv2.imshow("fft", np.real(fftimg))
cv2.imshow("ifft", np.uint8(ifft))
cv2.waitKey(0)
def LaplaceFFTDemo(self):
origfftimg = self.PrepareFFT()
fftimg = origfftimg.copy()
sz = fftimg.shape
center = np.mat(fftimg.shape) / 2.0
for i in xrange(0, 512):
for j in xrange(0, 512):
#pass
#print -(np.float64(i - center[0, 0]) ** 2.0 + np.float64(j - center[0, 1]) ** 2.0)
fftimg[i, j] *= - 0.00001* (np.float64(i - 256) ** 2.0 + np.float64(j - 256) ** 2.0)
ifft = self.GetIFFT(fftimg)
#plt.imshow(np.real(fftimg))
#plt.show()
# cv2.namedWindow("fft1")
# cv2.imshow("fft1", np.real(origfftimg))
cv2.namedWindow("fft")
cv2.imshow("fft", np.real(fftimg))
# cv2.imshow("ifft", np.uint8(ifft))
cv2.namedWindow("ifft")
cv2.imshow("ifft", ifft)
cv2.waitKey(0)
def GuassNoiseDemo(self, img, miu, theta, noiseNum):
sz = img.shape
noiseImg = self.GuassNoise(img, miu, theta, noiseNum)
gImg = img + noiseImg
#pdf = self.GuassianPDF(miu, theta)
# for i in xrange(0, noiseNum):
# x = np.random.random() * (sz[0] - 1)
# y = np.random.random() * (sz[1] - 1)
# noise = self.PDFMap(pdf, np.random.random())
# print noise
# noiseImg[x, y] = noiseImg[x, y] + noise
cv2.namedWindow("lena")
cv2.namedWindow("gauss noise")
cv2.namedWindow("dst")
cv2.imshow("lena", img)
cv2.imshow("gauss noise", np.uint8(noiseImg))
cv2.imshow("dst", np.uint8(gImg))
cv2.waitKey(0)
def evaluate_images():
ssd = SSD()
cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
loader = coco.Loader(False)
test_batches = loader.create_batches(1, shuffle=True)
global i2name
i2name = loader.i2name
while True:
batch = test_batches.next()
imgs, anns = loader.preprocess_batch(batch)
pred_labels_f, pred_locs_f, step = ssd.sess.run([ssd.pred_labels, ssd.pred_locs, ssd.global_step],
feed_dict={ssd.imgs_ph: imgs, ssd.bn: False})
boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0])
draw_outputs(imgs[0], boxes_, confidences_, wait=0)
def show_webcam(address):
cam = webcam.WebcamStream(address)
cam.start_stream_threads()
ssd = SSD()
global i2name
i2name = pickle.load(open("i2name.p", "rb"))
cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
boxes_ = None
confidences_ = None
while True:
sample = cam.image
resized_img = skimage.transform.resize(sample, (image_size, image_size))
pred_labels_f, pred_locs_f = ssd.sess.run([ssd.pred_labels, ssd.pred_locs],
feed_dict={ssd.imgs_ph: [resized_img], ssd.bn: False})
boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0], boxes_, confidences_)
resize_boxes(resized_img, sample, boxes_)
draw_outputs(np.asarray(sample) / 255.0, boxes_, confidences_, wait=10)
def gui():
size=100
img = np.zeros((1000,700,3), np.uint8)
cv2.namedWindow('GUI')
xmar=ymar=50
for i in range(6):
for j in range(4):
img1 = cv2.imread("faces/cara"+str(i+j+1)+".JPEG")
img1=resize(img1,width = size,height=size)
if (img1.shape[0] == 100 and img1.shape[1] == 100):
img[ymar:ymar+size, xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+size)] = img1
else:
img[ymar:ymar+img1.shape[0], xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+img1.shape[1])] = img1
ymar+=150
cv2.putText(img, "Presiona Q para salir", (5, 25),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
cv2.putText(img, "TFG Lucas Gago", (500, 925),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
cv2.putText(img, "Version 3", (500, 950),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
cv2.imshow('GUI',img)
def __init__(self, rom_name, vis,windowname='preview'):
self.ale = ALEInterface()
self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
self.ale.setInt("random_seed",123)
self.ale.setInt("frame_skip",4)
self.ale.loadROM('roms/' + rom_name )
self.legal_actions = self.ale.getMinimalActionSet()
self.action_map = dict()
self.windowname = windowname
for i in range(len(self.legal_actions)):
self.action_map[self.legal_actions[i]] = i
self.init_frame_number = 0
# print(self.legal_actions)
self.screen_width,self.screen_height = self.ale.getScreenDims()
print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
self.vis = vis
if vis:
cv2.startWindowThread()
cv2.namedWindow(self.windowname)
def setFingerTemplate(big_image, name_template_file):
global add_frame
name_window = 'big image'
cv2.namedWindow(name_window)
cv2.setMouseCallback(name_window,save_corners)
add_frame = np.zeros(big_image.shape, np.uint8)
while(True):
frame_with_rect = cv2.add(add_frame, big_image)
cv2.imshow(name_window,frame_with_rect)
cur_key = cv2.waitKey(1)
if cur_key == 27:
break
if cur_key == ord('s') and (len(corners_x) == 2):
template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
cv2.imwrite(name_template_file,template_img)
break
cv2.destroyAllWindows()
def setCameraProperties():
name_window = 'Press esc after finish'
cv2.namedWindow(name_window)
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_SETTINGS, 0);
ret, frame_from = cap.read()
while(cap.isOpened()):
ret, frame_from = cap.read()
frame = cv2.flip(frame_from, -1)
if ret==True:
cv2.imshow(name_window,frame)
if cv2.waitKey(1) & 0xFF == 27:
break
else:
break
# Release everything if job is finished
cap.release()
cv2.destroyAllWindows()