def __init__(self, boards, flags=0, pattern=Patterns.Chessboard, name='', checkerboard_flags=cv2.CALIB_CB_FAST_CHECK):
# Ordering the dimensions for the different detectors is actually a minefield...
if pattern == Patterns.Chessboard:
# Make sure n_cols > n_rows to agree with OpenCV CB detector output
self._boards = [ChessboardInfo(max(i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards]
elif pattern == Patterns.ACircles:
# 7x4 and 4x7 are actually different patterns. Assume square-ish pattern, so n_rows > n_cols.
self._boards = [ChessboardInfo(min(i.n_cols, i.n_rows), max(i.n_cols, i.n_rows), i.dim) for i in boards]
elif pattern == Patterns.Circles:
# We end up having to check both ways anyway
self._boards = boards
# Set to true after we perform calibration
self.calibrated = False
self.calib_flags = flags
self.checkerboard_flags = checkerboard_flags
self.pattern = pattern
self.br = cv_bridge.CvBridge()
# self.db is list of (parameters, image) samples for use in calibration. parameters has form
# (X, Y, size, skew) all normalized to [0,1], to keep track of what sort of samples we've taken
# and ensure enough variety.
self.db = []
# For each db sample, we also record the detected corners.
self.good_corners = []
# Set to true when we have sufficiently varied samples to calibrate
self.goodenough = False
self.param_ranges = [0.7, 0.7, 0.4, 0.5]
self.name = name
self.accu_image = None
python类CvBridge()的实例源码
def __init__(self):
self.data_dir = rospy.get_param('/store_data/data_dir')
self.category = rospy.get_param('/store_data/category')
self.pic_count = rospy.get_param('/store_data/init_idx')
self.rate = 1/float(rospy.get_param('/store_data/rate'))
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber('/camera/rgb/image_color', Image, self.img_cb)
self.patches_sub = rospy.Subscriber('/candidate_regions_depth', PolygonStamped, self.patches_cb)
self.capture_sub = rospy.Subscriber('/capture/keyboard', Bool, self.capture_cb)
# you can read this value off of your sensor from the '/camera/depth_registered/camera_info' topic
self.P = np.array([[525.0, 0.0, 319.5, 0.0], [0.0, 525.0, 239.5, 0.0], [0.0, 0.0, 1.0, 0.0]])
self.capture = False
def ImgCcallback(self, ros_img):
global imgi
imgi = mx.img.imdecode(ros_img.data)
global img_rect
global img_raw
global Frame
img_rect = CvBridge().compressed_imgmsg_to_cv2(ros_img)
# img_raw = img_rect.copy()
Frame = pv.Image(img_rect.copy())
# cv2.imshow("tester", Frame.asOpenCV())
# def Imgcallback(self, ros_img):
# global img_rect
# img_rect = CvBridge().imgmsg_to_cv2(ros_img)
def DepthImgcallback(self, ros_img):
global img_depth
img_depth = CvBridge().imgmsg_to_cv2(ros_img)
# cv2.imshow("tester", img_depth)
# def trackCallback(event):
# print "1"
def __init__(self, context):
"""
:param context: QT context, aka parent
"""
super(FolderImagePublisherPlugin, self).__init__(context)
# Widget setup
self.setObjectName('FolderImagePublisherPlugin')
self._widget = QWidget()
context.add_widget(self._widget)
# Layout and attach to widget
layout = QHBoxLayout()
self._widget.setLayout(layout)
self._info = QLineEdit()
self._info.setDisabled(True)
self._info.setText("Please choose a directory (top-right corner)")
layout.addWidget(self._info)
self._left_button = QPushButton('<')
self._left_button.clicked.connect(partial(self._rotate_and_publish, -1))
layout.addWidget(self._left_button)
self._right_button = QPushButton('>')
self._right_button.clicked.connect(partial(self._rotate_and_publish, 1))
layout.addWidget(self._right_button)
# Set subscriber and service to None
self._pub = rospy.Publisher("folder_image", Image, queue_size=1)
self._bridge = CvBridge()
self._files = collections.deque([])
def __init__(self, context):
"""
TestPlugin class to evaluate the image_recognition_msgs interfaces
:param context: QT context, aka parent
"""
super(TestPlugin, self).__init__(context)
# Widget setup
self.setObjectName('Test Plugin')
self._widget = QWidget()
context.add_widget(self._widget)
# Layout and attach to widget
layout = QVBoxLayout()
self._widget.setLayout(layout)
self._image_widget = ImageWidget(self._widget, self.image_roi_callback, clear_on_click=True)
layout.addWidget(self._image_widget)
# Input field
grid_layout = QGridLayout()
layout.addLayout(grid_layout)
self._info = QLineEdit()
self._info.setDisabled(True)
self._info.setText("Draw a rectangle on the screen to perform recognition of that ROI")
layout.addWidget(self._info)
# Bridge for opencv conversion
self.bridge = CvBridge()
# Set subscriber and service to None
self._sub = None
self._srv = None
def __init__(self, node_name, receive_topic, send_topic, virtual_off= None):
FunctionUnit.__init__(self, node_name)
self._receive_topic = receive_topic
self._send_topic = send_topic
self._virtual = virtual_off
self.br = CvBridge()
self._virtual_send = FunctionUnit.create_send(self, virtual_off, Bool)
def __init__(self, image_topic, target_x, target_y, target_w, target_h):
self.image_sub = rospy.Subscriber(image_topic, Image, self.callback_image, queue_size=1)
self.bridge = CvBridge()
self.frames = []
self.target_x, self.target_y, self.target_w, self.target_h = target_x, target_y, target_w, target_h
def __init__(self, prefix=None, color_range=None):
rospy.loginfo("Zarj eyes initialization begin")
self.bridge = CvBridge()
self.prefix = prefix
self.color_range = None
if color_range:
self.color_range = (
np.array(color_range[0]),
np.array(color_range[1])
)
self.image_sub_left = None
self.image_sub_right = None
rospy.loginfo("Zarj eyes initialization finished")
def __init__(self, align_path, net_path, storage_folder):
self._bridge = CvBridge()
self._learn_srv = rospy.Service('learn', LearnFace, self._learn_face_srv)
self._detect_srv = rospy.Service('detect', DetectFace, self._detect_face_srv)
self._clear_srv = rospy.Service('clear', Empty, self._clear_faces_srv)
# Init align and net
self._align = openface.AlignDlib(align_path)
self._net = openface.TorchNeuralNet(net_path, imgDim=96, cuda=False)
self._face_detector = dlib.get_frontal_face_detector()
self._face_dict = {} # Mapping from string to list of reps
self._face_dict_filename = rospy.get_param( '~face_dict_filename', '' )
if ( self._face_dict_filename != '' ):
if ( not os.path.isfile( self._face_dict_filename ) ):
print '_face_dict does not exist; will save to %s' % self._face_dict_filename
else:
with open( self._face_dict_filename, 'rb') as f:
self._face_dict = pickle.load( f )
print 'read _face_dict: %s' % self._face_dict_filename
if not os.path.exists(storage_folder):
os.makedirs(storage_folder)
self._storage_folder = storage_folder
def __init__(self):
self.bridge = CvBridge()
self.x = 0.0
self.y = 0.0
self.ang = 0.0
self.pos_sub = rospy.Subscriber("/slam_out_pose", PoseStamped, self.pos_callback)
self.image_sub = rospy.Subscriber("/map_image/full",Image,self.img_callback)
erle_rover_followline.py 文件源码
项目:gazebo_python_examples
作者: erlerobot
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def __init__(self):
self.bridge = CvBridge()
self.pub = rospy.Publisher('/mavros/rc/override', OverrideRCIn, queue_size=10)
self.image_sub = rospy.Subscriber("/rover/front/image_front_raw",Image,self.callback)
def __init__(self):
token = rospy.get_param('/telegram/token', None)
if token is None:
rospy.logerr("No token found in /telegram/token param server.")
exit(0)
else:
rospy.loginfo("Got telegram bot token from param server.")
# Set CvBridge
self.bridge = CvBridge()
# Create the EventHandler and pass it your bot's token.
updater = Updater(token)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on message...
dp.add_handler(MessageHandler(Filters.text, self.pub_received))
dp.add_handler(CallbackQueryHandler(self.button))
# log all errors
dp.add_error_handler(self.error)
# Start the Bot
updater.start_polling()
def __init__(self):
self.node_name = "move_tbot"
rospy.init_node(self.node_name)
rospy.on_shutdown(self._shutdown)
self.bridge = CvBridge()
self.turn = Twist()
self.move = GoToPose()
# self.face_recog_file = FaceRecognition()
# self.get_person_data = GetPersonData()
self.qr_data = []
self.all_face_names = []
self.face_names = []
self.counter = 0
self.qr_sub = rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.qr_callback)
self.odom_sub = rospy.Subscriber('odom', Odometry, self.odom_callback)
self.num_fingers_sub = rospy.Subscriber('num_fingers', Int32, self.num_fingers_callback)
# self.hand_img_sub = rospy.Subscriber('hand_img', Image, self.hand_img_callback)
# self.face_img_sub = rospy.Subscriber('face_img', Image, self.face_img_callback)
self.face_name_sub = rospy.Subscriber('face_names', StringArray, self.face_names_callback)
self.all_face_names_sub = rospy.Subscriber('all_face_names', StringArray, self.all_face_names_callback)
self.turn_pub = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10)
self.rate = rospy.Rate(10)
while not rospy.is_shutdown():
self.run_tbot_routine()
def __init__(self):
self.bridge = cv_bridge.CvBridge()
cv2.namedWindow("window", 1)
self.image_sub = rospy.Subscriber('camera/rgb/image_raw',
Image, self.image_callback)
def __init__(self):
self.bridge = cv_bridge.CvBridge()
cv2.namedWindow("window", 1)
self.image_sub = rospy.Subscriber('camera/rgb/image_raw',
Image, self.image_callback)
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop',
Twist, queue_size=1)
self.twist = Twist()
def test_object_image(self):
"""Tests posting telemetry data through client."""
# Set up test data.
url = "http://interop"
client_args = (url, "testuser", "testpass", 1.0)
object_id = 1
width = 40
height = 30
nparr = np.random.randint(0, 256, (width, height, 3)).astype(np.uint8)
bridge = CvBridge()
ros_img = bridge.cv2_to_imgmsg(nparr)
img = ObjectImageSerializer.from_msg(ros_img)
with InteroperabilityMockServer(url) as server:
# Setup mock server.
server.set_root_response()
server.set_login_response()
server.set_post_object_image_response(object_id)
server.set_get_object_image_response(object_id, img, "image/png")
server.set_delete_object_image_response(object_id)
# Connect client.
client = InteroperabilityClient(*client_args)
client.wait_for_server()
client.login()
client.post_object_image(object_id, ros_img)
client.get_object_image(object_id)
client.delete_object_image(object_id)
def from_msg(cls, msg):
"""Serializes a ROS Image message into a compressed PNG image.
Args:
msg: ROS Image or CompressedImage message.
Returns:
Compressed PNG image.
Raises:
CvBridgeError: On image conversion error.
"""
if isinstance(msg, CompressedImage):
# Decompress message.
msg = cls.from_raw(msg.data)
# Convert ROS Image to OpenCV image.
bridge = CvBridge()
img = bridge.imgmsg_to_cv2(msg)
# Convert to PNG with highest level of compression to limit bandwidth
# usage. PNG is used since it is a lossless format, so this can later
# be retrieved as a ROS image without issue.
compression = [cv2.IMWRITE_PNG_COMPRESSION, 9]
png = cv2.imencode(".png", img, compression)[1].tostring()
return png
def from_raw(cls, raw, compress=False):
"""Deserializes binary-encoded image data into a ROS Image message.
Args:
raw: Binary encoded image data.
compress: Whether to return a compressed image or not.
Returns:
ROS Image or CompressedImage message.
Raises:
CvBridgeError: On image conversion error.
"""
# Convert to OpenCV image.
nparr = np.fromstring(raw, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Convert to ROS message.
bridge = CvBridge()
msg = bridge.cv2_to_imgmsg(img)
if compress:
data = cls.from_msg(msg)
msg = CompressedImage()
msg.format = "png"
msg.data = data
return msg
def __init__(self):
"""ROS Subscriptions """
self.image_pub = rospy.Publisher("/image_converter/output_video",Image, queue_size=10)
self.image_sub = rospy.Subscriber("/raspicam_node/image/image_raw",Image,self.cvt_image)
self.cmdVelocityPub = rospy.Publisher('/platform_control/cmd_vel', Twist, queue_size=10)
self.cmdVelocityStampedPub = rospy.Publisher('/platform_control/cmd_vel_stamped', TwistStamped, queue_size=10)
""" Variables """
self.bridge = CvBridge()
self.latestImage = None
self.outputImage = None
self.blurImage = None
self.edgeImage = None
self.maskedImage = None
self.lineMarkedImage = None
self.imgRcvd = False
self.kernel_size = 11
self.low_threshold = 40
self.high_threshold = 50
self.rho = 1
self.theta = np.pi/180
self.threshold = 100
self.min_line_len = 60
self.max_line_gap = 80
self.lines = (0, 0, 0, 0)
self.intersectionPoint = (0, 0)
self.speed = 0.2
self.flag = 0