python类TransformBroadcaster()的实例源码

application.py 文件源码 项目:atf 作者: ipa-fmw 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self):
        # ATF code
        self.atf = ATF()

        # native app code
        self.pub_freq = 20.0 # Hz
        self.br = tf.TransformBroadcaster()
        rospy.sleep(1) #wait for tf broadcaster to get active (rospy bug?)
detect_crazyflie.py 文件源码 项目:ROS-Robotics-by-Example 作者: FairchildC 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self):

      # initialize ROS node and transform publisher
      rospy.init_node('crazyflie_detector', anonymous=True)
      self.pub_tf = tf.TransformBroadcaster()

      self.rate = rospy.Rate(50.0)                      # publish transform at 50 Hz

      # initialize values for crazyflie location on Kinect v2 image
      self.cf_u = 0                        # u is pixels left(0) to right(+)
      self.cf_v = 0                        # v is pixels top(0) to bottom(+)
      self.cf_d = 0                        # d is distance camera(0) to crazyflie(+) from depth image
      self.last_d = 0                      # last non-zero depth measurement

      # crazyflie orientation to Kinect v2 image (Euler)
      self.r = -1.5708
      self.p = 0
      self.y = -3.1415

      # Convert image from a ROS image message to a CV image
      self.bridge = CvBridge()

      cv2.namedWindow("KinectV2", 1)

      # Wait for the camera_info topic to become available
      rospy.wait_for_message('/kinect2/qhd/camera_info', CameraInfo)

      # Subscribe to Kinect v2 sd camera_info to get image frame height and width
      rospy.Subscriber('/kinect2/qhd/camera_info', CameraInfo, self.camera_data, queue_size=1)

      # Subscribe to registered color and depth images
      rospy.Subscriber('/kinect2/qhd/image_color_rect', Image, self.image_callback, queue_size=1)
      rospy.Subscriber('/kinect2/qhd/image_depth_rect', Image, self.depth_callback, queue_size=1)

      self.rate.sleep()                        # suspend until next cycle

   # This callback function sets parameters regarding the camera.
roboclaw_sim.py 文件源码 项目:roboclaw_ros 作者: doisyg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def publish_odom(self, cur_x, cur_y, cur_theta, vx, vth):
        quat = tf.transformations.quaternion_from_euler(0, 0, cur_theta)
        current_time = rospy.Time.now()

        br = tf.TransformBroadcaster()
        br.sendTransform((cur_x, cur_y, 0),
                         tf.transformations.quaternion_from_euler(0, 0, cur_theta),
                         current_time,
                         "base_link",
                         "odom")

        odom = Odometry()
        odom.header.stamp = current_time
        odom.header.frame_id = 'odom'

        odom.pose.pose.position.x = cur_x
        odom.pose.pose.position.y = cur_y
        odom.pose.pose.position.z = 0.0
        odom.pose.pose.orientation = Quaternion(*quat)

        odom.pose.covariance[0] = 0.01
        odom.pose.covariance[7] = 0.01
        odom.pose.covariance[14] = 99999
        odom.pose.covariance[21] = 99999
        odom.pose.covariance[28] = 99999
        odom.pose.covariance[35] = 0.01

        odom.child_frame_id = 'base_link'
        odom.twist.twist.linear.x = vx
        odom.twist.twist.linear.y = 0
        odom.twist.twist.angular.z = vth
        odom.twist.covariance = odom.pose.covariance

        self.odom_pub.publish(odom)
roboclaw_pyscript.py 文件源码 项目:roboclaw_ros 作者: doisyg 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def publish_odom(self, cur_x, cur_y, cur_theta, vx, vth):
        quat = tf.transformations.quaternion_from_euler(0, 0, cur_theta)
        current_time = rospy.Time.now()

        br = tf.TransformBroadcaster()
        br.sendTransform((cur_x, cur_y, 0),
                         tf.transformations.quaternion_from_euler(0, 0, cur_theta),
                         current_time,
                         "base_link",
                         "odom")

        odom = Odometry()
        odom.header.stamp = current_time
        odom.header.frame_id = 'odom'

        odom.pose.pose.position.x = cur_x
        odom.pose.pose.position.y = cur_y
        odom.pose.pose.position.z = 0.0
        odom.pose.pose.orientation = Quaternion(*quat)

        odom.pose.covariance[0] = 0.01
        odom.pose.covariance[7] = 0.01
        odom.pose.covariance[14] = 99999
        odom.pose.covariance[21] = 99999
        odom.pose.covariance[28] = 99999
        odom.pose.covariance[35] = 0.01

        odom.child_frame_id = 'base_link'
        odom.twist.twist.linear.x = vx
        odom.twist.twist.linear.y = 0
        odom.twist.twist.angular.z = vth
        odom.twist.covariance = odom.pose.covariance

        self.odom_pub.publish(odom)
cap_to_obs_tf.py 文件源码 项目:tea 作者: antorsae 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def handle_msg_car(msg, who):
    assert isinstance(msg, Odometry)

    global last_cap_r, last_cap_f, last_cap_yaw

    if who == 'cap_r':
        last_cap_r = rtk_position_to_numpy(msg)
    elif who == 'cap_f' and last_cap_r is not None:
        cap_f = rtk_position_to_numpy(msg)
        cap_r = last_cap_r

        last_cap_f = cap_f
        last_cap_yaw = get_yaw(cap_f, cap_r)
    elif who == 'obs_r' and last_cap_f is not None and last_cap_yaw is not None:
        md = None
        for obs in metadata:
            if obs['obstacle_name'] == 'obs1':
                md = obs
        assert md, 'obs1 metadata not found'

        # find obstacle rear RTK to centroid vector
        lrg_to_gps = [md['rear_gps_l'], -md['rear_gps_w'], md['rear_gps_h']]
        lrg_to_centroid = [md['l'] / 2., -md['w'] / 2., md['h'] / 2.]
        obs_r_to_centroid = np.subtract(lrg_to_centroid, lrg_to_gps)

        # in the fixed GPS frame 
        cap_f = last_cap_f
        obs_r = rtk_position_to_numpy(msg)

        # in the capture vehicle front RTK frame
        cap_to_obs = np.dot(rotMatZ(-last_cap_yaw), obs_r - cap_f)
        cap_to_obs_centroid = cap_to_obs + obs_r_to_centroid

        br = tf.TransformBroadcaster()
        br.sendTransform(tuple(cap_to_obs_centroid), (0,0,0,1), rospy.Time.now(), 'obs_centroid', 'gps_antenna_front')

        # publish obstacle bounding box
        marker = Marker()
        marker.header.frame_id = "obs_centroid"
        marker.header.stamp = rospy.Time.now()

        marker.type = Marker.CUBE
        marker.action = Marker.ADD

        marker.scale.x = md['l']
        marker.scale.y = md['w']
        marker.scale.z = md['h']

        marker.color.r = 0.2
        marker.color.g = 0.5
        marker.color.b = 0.2
        marker.color.a = 0.5

        marker.lifetime = rospy.Duration()

        pub = rospy.Publisher("obs_bbox", Marker, queue_size=10)
        pub.publish(marker)
ros_node.py 文件源码 项目:tea 作者: antorsae 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def handle_radar_msg(msg, dont_fuse):
    assert msg._md5sum == '6a2de2f790cb8bb0e149d45d297462f8'

    publish_rviz_topics = True

    with g_fusion_lock:
        # do we have any estimation?
        if g_fusion.last_state_mean is not None:
            pose = g_fusion.lidar_observation_function(g_fusion.last_state_mean)

            observations = RadarObservation.from_msg(msg, RADAR_TO_LIDAR, 0.9115)

            # find nearest observation to current object position estimation
            distance_threshold = 4.4
            nearest = None
            nearest_dist = 1e9
            for o in observations:
                dist = [o.x - pose[0], o.y - pose[1], o.z - pose[2]]
                dist = np.sqrt(np.array(dist).dot(dist))

                if dist < nearest_dist and dist < distance_threshold:
                    nearest_dist = dist
                    nearest = o

            if nearest is not None:
                # FUSION
                if not dont_fuse:
                    g_fusion.filter(nearest)

                if publish_rviz_topics:
                    header = Header()
                    header.frame_id = '/velodyne'
                    header.stamp = rospy.Time.now()
                    point = np.array([[nearest.x, nearest.y, nearest.z]], dtype=np.float32)
                    rospy.Publisher(name='obj_radar_nearest',
                      data_class=PointCloud2,
                      queue_size=1).publish(pc2.create_cloud_xyz32(header, point))

                    #pose = g_fusion.lidar_observation_function(g_fusion.last_state_mean)

                    #br = ros_tf.TransformBroadcaster()
                    #br.sendTransform(tuple(pose), (0,0,0,1), rospy.Time.now(), 'car_fuse_centroid', 'velodyne')

#                     if last_known_box_size is not None:
#                         # bounding box
#                         marker = Marker()
#                         marker.header.frame_id = "car_fuse_centroid"
#                         marker.header.stamp = rospy.Time.now()
#                         marker.type = Marker.CUBE
#                         marker.action = Marker.ADD
#                         marker.scale.x = last_known_box_size[2]
#                         marker.scale.y = last_known_box_size[1]
#                         marker.scale.z = last_known_box_size[0]
#                         marker.color = ColorRGBA(r=1., g=1., b=0., a=0.5)
#                         marker.lifetime = rospy.Duration()
#                         pub = rospy.Publisher("car_fuse_bbox", Marker, queue_size=10)
#                         pub.publish(marker)
baxter_continuous_scan.py 文件源码 项目:cu-perception-manipulation-stack 作者: correlllab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def move_cup(self):
        self.gripper.close()

        rotate = long(random.randrange(20, 180))*(3.14156)/(180) 
        print("rotate: " + str(rotate))
        t = self.tl.getLatestCommonTime("/root", "/right_gripper")
        cur_position, quaternion = self.tl.lookupTransform("/root",
                                                           "/right_gripper",
                                                           t)
        quaternion = [1,0,0,0]
        requrd_rot = (0,0,rotate) # in radians
        requrd_trans = (0,0,0)                
        rotated_pose = self.getOffsetPoses(cur_position, quaternion, requrd_rot, requrd_trans)
        trans_5= tuple(rotated_pose[:3])
        quat_5= tuple(rotated_pose[3:])
        br = tf.TransformBroadcaster()
        br.sendTransform(trans_5,
                                  quat_5,
                                  rospy.Time.now(),
                                  "pick_pose",
                                  "/root")  
        pick_pose = PoseStamped(Header(0, rospy.Time(0), n.robot.base),
                                Pose(Point(*trans_5),
                                Quaternion(*quat_5)))
        self.move_ik(pick_pose)
        self.gripper.open()

        t = self.tl.getLatestCommonTime("/root", "/right_gripper")
        cur_position, quaternion = self.tl.lookupTransform("/root",
                                                        "/right_gripper",
                                                           t)
        cur_position = list(cur_position)
        cur_position[2] = cur_position[2] + 0.08
        pose = Pose(Point(*cur_position),
                        Quaternion(*quaternion))
        while True:
            try:
                stamped_pose = PoseStamped( Header(0, rospy.Time(0), n.robot.base), pose)
                self.move_ik(stamped_pose)
                break
            except AttributeError:
                print("can't find valid pose for gripper cause I'm dumb")
                return False

        rospy.sleep(2)

        home = PoseStamped(
                    Header(0, rospy.Time(0), n.robot.base),
                    Pose(Point(0.60558, -0.24686, 0.093535),
                    Quaternion(0.99897, -0.034828, 0.027217, 0.010557)))
        self.move_ik(home) 
        rospy.sleep(2)
safety_stop.py 文件源码 项目:cu-perception-manipulation-stack 作者: correlllab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def control_from_sensor_values(self):
        log_values = np.log(self.bx.inside)
        # Match which side is which. Ideally, if the sign of the diff
        # matches whether the gripper needs to move towards the
        # positive or negative part of the y axis in left_gripper.
        # That is, we need left side - right side (using left/right
        # like in l_gripper_{l, r}_finger_tip tfs)
        # We want to take log(values) for a better behaved controller
        inside_diff = log_values[7:] - log_values[:7]
        scalar_diff = sum(inside_diff) / len(inside_diff)

        # Take negative for one of the sides, so that angles should
        # match for a parallel object in the gripper
        l_angle, _ = np.polyfit(np.arange(7), log_values[:7], 1)
        r_angle, _ = np.polyfit(np.arange(7), -log_values[7:], 1)
        rospy.loginfo('Angle computed from l: {}'.format(np.rad2deg(l_angle)))
        rospy.loginfo('Angle computed from r: {}'.format(np.rad2deg(r_angle)))
        avg_angle = np.arctan((l_angle + r_angle) / 2.0)

        # Let's get a frame by the middle of the end effector
        # p = Point(0, 0, -0.05)
        p = (0, 0, -0.05)
        # Of course, tf uses (w, x, y, z), Quaternion uses x, y, z,
        # w. However, tf.TransformBroadcaster().sendTransform uses the
        # tf order.
        # q = Quaternion(q[1], q[2], q[3], q[0])
        q = tf.transformations.quaternion_about_axis(avg_angle, (1, 0, 0))
        # We had a lot of trouble sending a transform (p, q) from
        # left_gripper, and then asking for a point in the last frame
        # in the tf coordinate. Timing issues that I couldn't
        # solve. Instead, do it manually here:
        m1 = tf.transformations.quaternion_matrix(q)
        m1[:3, 3] = p
        p = (0, scalar_diff / 100, 0.05)
        m2 = np.eye(4)
        m2[:3, 3] = p
        m = m2.dot(m1)
        # Extract pose now
        p = Point(*m[:3, 3])
        q = Quaternion(*tf.transformations.quaternion_from_matrix(m))
        time = rospy.Time(0)
        h = Header()
        h.frame_id = 'left_gripper'
        h.stamp = time
        pose = Pose(p, q)
        new_endpose = self.tl.transformPose('base', PoseStamped(h, pose))
        self.bx.move_ik(new_endpose)
manager.py 文件源码 项目:cu-perception-manipulation-stack 作者: correlllab 项目源码 文件源码 阅读 66 收藏 0 点赞 0 评论 0
def __init__(self, robot, *robotargs):
        super(PickAndPlaceNode, self).__init__('pp_node')
    rospy.loginfo("PickAndPlaceNode")
        _post_perceive_trans = defaultdict(lambda: self._pick)
        _post_perceive_trans.update({'c': self._calibrate})
        _preplace = defaultdict(lambda: self._preplace)
        self.transition_table = {
            # If calibration has already happened, allow skipping it
            'initial': {'c': self._calibrate, 'q': self._perceive,
                        's': self._preplace},
            'calibrate': {'q': self._perceive, 'c': self._calibrate},
            'perceive': {'p': self._post_perceive, 'q': self._perceive, 's': self._stop_perceive, 'c': self._calibrate},
            'post_perceive': _post_perceive_trans,
            'postpick': {'1': self._level, '2': self._level, '9': self._level},
            'level': _preplace,
            'preplace': {'s': self._place},
            'place': {'q': self._perceive, 'c': self._calibrate}
            }
    rospy.loginfo("PickAndPlaceNode1")
        if callable(robot):
            self.robot = robot(*robotargs)
        else:
            self.robot = robot
        self.robot.level = 1
    rospy.loginfo("PickAndPlaceNode2")
        # Hardcoded place for now
        self.place_pose = PoseStamped(
            Header(0, rospy.Time(0), self.robot.base),
            Pose(Point(0.526025806, 0.4780144, -0.161326153),
                 Quaternion(1, 0, 0, 0)))
        self.tl = tf.TransformListener()
        self.num_objects = 0
        # Would this work too? Both tf and tf2 have (c) 2008...
        # self.tf2 = tf2_ros.TransformListener()
        self.n_objects_sub = rospy.Subscriber("/num_objects", Int64,
                                              self.update_num_objects,
                                              queue_size=1)
        self.perception_pub = rospy.Publisher("/perception/enabled",
                                              Bool,
                                              queue_size=1)
        self.alignment_pub = rospy.Publisher("/alignment/doit",
                                             Bool,
                                             queue_size=1)
        self.br = tf.TransformBroadcaster()
    rospy.loginfo("PickAndPlaceNode3")
        self.int_marker_server = InteractiveMarkerServer('int_markers')
        # Dict to map imarker names and their updated poses
        self.int_markers = {}

        # Ideally this call would be in a Factory/Metaclass/Parent
        self.show_options()
        self.perceive = False
        # self.robot.home()
        # self.move_calib_position()


问题


面经


文章

微信
公众号

扫码关注公众号