def GetWayPoints(self,Data):
filename = Data.data
#clear all existing marks in rviz
for marker in self.makerArray.markers:
marker.action = Marker.DELETE
self.makerArray_pub.publish(self.makerArray)
# clear former lists
self.waypoint_list.clear()
self.marker_list[:] = []
self.makerArray.markers[:] = []
f = open(filename,'r')
for line in f.readlines():
j = line.split(",")
current_wp_name = str(j[0])
current_point = Point(float(j[1]),float(j[2]),float(j[3]))
current_quaternion = Quaternion(float(j[4]),float(j[5]),float(j[6]),float(j[7]))
self.waypoint_list[current_wp_name] = Pose(current_point,current_quaternion)
f.close()
self.create_markers()
self.makerArray_pub.publish(self.makerArray)
python类Point()的实例源码
baxter_cube_calibration.py 文件源码
项目:cu-perception-manipulation-stack
作者: correlllab
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def update_translation(self):
t = self.tl.getLatestCommonTime("/root", "/camera_link")
position, quaternion = self.tl.lookupTransform("/root",
"/camera_link",
t)
print("Cam Pos:")
print(position)
translation = self.touch_centroids[0] - self.camera_centroids[0]
print("Translation: ")
print(translation)
position = position + translation
print("New Cam Pos:")
print(position)
#print(self.touch_centroids)
#print(self.camera_centroids)
self.update_transform.publish( Pose(Point(*position), Quaternion(*quaternion)))
def _place(self):
self.state = "place"
rospy.loginfo("Placing...")
place_pose = self.int_markers['place_pose']
# It seems positions and orientations are randomly required to
# be actual Point and Quaternion objects or lists/tuples. The
# least coherent API ever seen.
self.br.sendTransform(Point2list(place_pose.pose.position),
Quaternion2list(place_pose.pose.orientation),
rospy.Time.now(),
"place_pose",
self.robot.base)
self.robot.place(place_pose)
# For cubelets:
place_pose.pose.position.z += 0.042
# For YCB:
# place_pose.pose.position.z += 0.026
self.place_pose = place_pose
def __init__(self):
self._angle = 0;
self._step = STEP * -1
self._x = LINE_START_X
self._y = LINE_START_Y
self._x = RECT_START_X
self._y = RECT_START_Y
self._step_x = STEP
self._step_y = STEP
self._q_side = 0
self._cf = [Swarmfly(sc=self, cfid=0, color=ColorRGBA(1, 0, 1, 1), hoverpoint=Point(SPACE_OFFSET_X+1.2, SPACE_OFFSET_Y+1.2, SPACE_OFFSET_Z+1), takeoffpoint=Point(SPACE_OFFSET_X+1.25, SPACE_OFFSET_Y+0.15, SPACE_OFFSET_Z+0.5), landingpoint=Point(SPACE_OFFSET_X+0.25, SPACE_OFFSET_Y+0.15, SPACE_OFFSET_Z+0.5)),
Swarmfly(sc=self, cfid=1, color=ColorRGBA(1, 1, 0, 1), hoverpoint=Point(SPACE_OFFSET_X+0.7, SPACE_OFFSET_Y+0.7, SPACE_OFFSET_Z+1), takeoffpoint=Point(SPACE_OFFSET_X+1.75, SPACE_OFFSET_Y+0.15, SPACE_OFFSET_Z+0.5), landingpoint=Point(SPACE_OFFSET_X+0.75, SPACE_OFFSET_Y+0.15, SPACE_OFFSET_Z+0.5))]
for cf in self._cf:
cf.hoverpoint = self._calc_circle_position(cf.id)
self._rand_p1 = None
self._rand_p2 = None
self.timer = rospy.Timer(rospy.Duration(1.0/UPDATE_RATE),
self._run)
def get_mean_point(array_points):
x_array = []
y_array = []
z_array = []
for point in array_points:
x_array.append(point.x)
y_array.append(point.y)
z_array.append(point.z)
point_mean = Point()
point_mean.x = np.mean(x_array)
point_mean.y = np.mean(y_array)
point_mean.z = np.mean(z_array)
point_std = Point()
point_std.x = np.mean(x_array)
point_std.y = np.mean(y_array)
point_std.z = np.mean(z_array)
return (point_mean, point_std)
def __init__(self):
self.bridge = CvBridge() #allows us to convert our image to cv2
self.zed_pub = rsp.Publisher("/image_echo", Image, queue_size = 1)
self.imginfo_pub = rsp.Publisher("/exploring_challenge", img_info, queue_size = 1)
self.zed_img = rsp.Subscriber("/camera/rgb/image_rect_color", Image, self.detect_img) #subscribes to the ZED camera image
self.odom_sub = rsp.Subscriber("/vesc/odom", Odometry, self.odom_callback)
self.last_arb_position = Point()
self.gone_far_enough = True
self.header = std_msgs.msg.Header()
self.heightThresh = 75 #unit pixels MUST TWEAK
self.odomThresh = 1 #unit meters
self.blob_msg = img_info()
rsp.init_node("vision_node")
def __init__(self):
self.bridge = CvBridge()
# initializes subscriber for Baxter's left hand camera image topic
self.image_sub = rospy.Subscriber("/cameras/left_hand_camera/image",Image,self.find_cups)
# initializes subscriber for the location of the treasure (published by the find_treasure node)
self.treasure_location_sub = rospy.Subscriber("/treasure_location",Treasure,self.find_treasure)
# initializes publisher to publish the location of the cup containing the treasure
self.treasure_cup_pub = rospy.Publisher("treasure_cup_location",Point,queue_size=10)
# initializes publisher to publish the processed image to Baxter's display
self.image_tracking_pub = rospy.Publisher("/robot/xdisplay",Image,queue_size=10)
self.treasure_cup_location = Point()
self.cups = []
self.cupCenters = [[0,0],[0,0],[0,0]]
self.wasPreviouslyTrue = False
self.flag = False
self.minRadius = 10
for i in range(0,3):
self.cups.append(cup())
# determines the location of the 3 red cups (callback for the image topic subscriber)
def test_pose(self):
t = Pose(
position=Point(1.0, 2.0, 3.0),
orientation=Quaternion(*transformations.quaternion_from_euler(np.pi, 0, 0))
)
t_mat = ros_numpy.numpify(t)
np.testing.assert_allclose(t_mat.dot([0, 0, 1, 1]), [1.0, 2.0, 2.0, 1.0])
msg = ros_numpy.msgify(Pose, t_mat)
np.testing.assert_allclose(msg.position.x, t.position.x)
np.testing.assert_allclose(msg.position.y, t.position.y)
np.testing.assert_allclose(msg.position.z, t.position.z)
np.testing.assert_allclose(msg.orientation.x, t.orientation.x)
np.testing.assert_allclose(msg.orientation.y, t.orientation.y)
np.testing.assert_allclose(msg.orientation.z, t.orientation.z)
np.testing.assert_allclose(msg.orientation.w, t.orientation.w)
def geom_pose_from_rt(rt):
msg = geom_msg.Pose()
msg.position = geom_msg.Point(x=rt.tvec[0], y=rt.tvec[1], z=rt.tvec[2])
xyzw = rt.quat.to_xyzw()
msg.orientation = geom_msg.Quaternion(x=xyzw[0], y=xyzw[1], z=xyzw[2], w=xyzw[3])
return msg
def publish_cloud_markers(pub_ns, _arr, c='b', stamp=None, flip_rb=False, frame_id='map'):
"""
Publish point cloud on:
pub_ns: Namespace on which the cloud will be published
arr: numpy array (N x 3) for point cloud data
c: Option 1: 'c', 'b', 'r' etc colors accepted by matplotlibs color
Option 2: float ranging from 0 to 1 via matplotlib's jet colormap
Option 3: numpy array (N x 3) with r,g,b vals ranging from 0 to 1
s: supported only by matplotlib plotting
alpha: supported only by matplotlib plotting
"""
arr, carr = copy_pointcloud_data(_arr, c, flip_rb=flip_rb)
marker = vis_msg.Marker(type=vis_msg.Marker.SPHERE_LIST, ns=pub_ns, action=vis_msg.Marker.ADD)
marker.header.frame_id = frame_id
marker.header.stamp = stamp if stamp is not None else rospy.Time.now()
marker.scale.x = 0.01
marker.scale.y = 0.01
# XYZ
inds, = np.where(~np.isnan(arr).any(axis=1))
marker.points = [geom_msg.Point(arr[j,0], arr[j,1], arr[j,2]) for j in inds]
# RGB (optionally alpha)
N, D = arr.shape[:2]
if D == 3:
marker.colors = [std_msg.ColorRGBA(carr[j,0], carr[j,1], carr[j,2], 1.0)
for j in inds]
elif D == 4:
marker.colors = [std_msg.ColorRGBA(carr[j,0], carr[j,1], carr[j,2], carr[j,3])
for j in inds]
marker.lifetime = rospy.Duration()
_publish_marker(marker)
print 'Publishing marker', N
def publish_line_segments(pub_ns, _arr1, _arr2, c='b', stamp=None, frame_id='camera', flip_rb=False, size=0.002):
"""
Publish point cloud on:
pub_ns: Namespace on which the cloud will be published
arr: numpy array (N x 3) for point cloud data
c: Option 1: 'c', 'b', 'r' etc colors accepted by matplotlibs color
Option 2: float ranging from 0 to 1 via matplotlib's jet colormap
Option 3: numpy array (N x 3) with r,g,b vals ranging from 0 to 1
s: supported only by matplotlib plotting
alpha: supported only by matplotlib plotting
"""
arr1, carr = copy_pointcloud_data(_arr1, c, flip_rb=flip_rb)
arr2 = (deepcopy(_arr2)).reshape(-1,3)
marker = vis_msg.Marker(type=vis_msg.Marker.LINE_LIST, ns=pub_ns, action=vis_msg.Marker.ADD)
if not arr1.shape == arr2.shape: raise AssertionError
marker.header.frame_id = frame_id
marker.header.stamp = stamp if stamp is not None else rospy.Time.now()
marker.scale.x = size
marker.scale.y = size
marker.color.b = 1.0
marker.color.a = 1.0
marker.pose.position = geom_msg.Point(0,0,0)
marker.pose.orientation = geom_msg.Quaternion(0,0,0,1)
# Handle 3D data: [ndarray or list of ndarrays]
arr12 = np.hstack([arr1, arr2])
inds, = np.where(~np.isnan(arr12).any(axis=1))
marker.points = []
for j in inds:
marker.points.extend([geom_msg.Point(arr1[j,0], arr1[j,1], arr1[j,2]),
geom_msg.Point(arr2[j,0], arr2[j,1], arr2[j,2])])
# RGB (optionally alpha)
marker.colors = [std_msg.ColorRGBA(carr[j,0], carr[j,1], carr[j,2], 1.0)
for j in inds]
marker.lifetime = rospy.Duration()
_publish_marker(marker)
def publish_pose(pose, stamp=None, frame_id='camera'):
msg = geom_msg.PoseStamped();
msg.header.frame_id = frame_id
msg.header.stamp = stamp if stamp is not None else rospy.Time.now()
tvec = pose.tvec
x,y,z,w = pose.quat.to_xyzw()
msg.pose.position = geom_msg.Point(tvec[0],tvec[1],tvec[2])
msg.pose.orientation = geom_msg.Quaternion(x,y,z,w)
_publish_pose(msg)
def process_msg(msg, who):
msg._type == 'nav_msgs/Odometry'
global last_rear, last_front, last_yaw, last_front_t
if who == 'rear':
last_rear = get_position_from_odometry(msg)
elif who == 'front' and last_rear is not None:
cur_front = get_position_from_odometry(msg)
last_yaw = get_yaw(cur_front, last_rear)
twist_lin = np.dot(rotMatZ(-last_yaw), get_speed_from_odometry(msg))
if last_front is not None:
dt = msg.header.stamp.to_sec() - last_front_t
speed = (cur_front - last_front)/dt
speed = np.dot(rotMatZ(-last_yaw), speed)
print '1', speed
print '2', twist_lin
print '3', np.sqrt((speed-twist_lin).dot(speed-twist_lin))
odo = Odometry()
odo.header.frame_id = '/base_link'
odo.header.stamp = rospy.Time.now()
speed_yaw = get_yaw([0,0,0], -speed) #[-speed[0],-speed[1]*100,0])
speed_yaw_q = ros_tf.transformations.quaternion_from_euler(0, 0, speed_yaw)
odo.pose.pose.orientation = Quaternion(*list(speed_yaw_q))
#odo.twist.twist.linear = Point(x=speed[0], y=speed[1], z=speed[2])
odo.twist.covariance = list(np.eye(6,6).reshape(1,-1).squeeze())
pub = rospy.Publisher('odo_speed', Odometry, queue_size=1).publish(odo)
#print last_yaw
last_front = cur_front
last_front_t = msg.header.stamp.to_sec()
return twist_lin
test_lemniscate_trajectory.py 文件源码
项目:trajectory_tracking
作者: lmiguelvargasf
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def setUp(self):
self.trajectory = LemniscateTrajectory(5, 4)
self.expected_position = Point()
test_linear_trajectory.py 文件源码
项目:trajectory_tracking
作者: lmiguelvargasf
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def setUp(self):
self.trajectory = LinearTrajectory(1, 0, 2, 0)
self.expected_position = Point()
test_lissajous_trajectory.py 文件源码
项目:trajectory_tracking
作者: lmiguelvargasf
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def setUp(self):
self.trajectory = LissajousTrajectory(1, 1, 3, 2, 4)
self.expected_position = Point()
test_circular_trajectory.py 文件源码
项目:trajectory_tracking
作者: lmiguelvargasf
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def setUp(self):
self.trajectory = CircularTrajectory(5, 4)
self.expected_position = Point()
test_squared_trajectory.py 文件源码
项目:trajectory_tracking
作者: lmiguelvargasf
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def setUp(self):
self.trajectory = SquaredTrajectory(3, 4)
self.expected_position = Point()
def GetWayPoints(self,Data):
# dir = os.path.dirname(__file__)
# filename = dir+'/locationPoint.txt'
filename = Data.data
rospy.loginfo(str(filename))
rospy.loginfo(self.locations)
self.locations.clear()
rospy.loginfo(self.locations)
f = open(filename,'r')
for i in f.readlines():
j = i.split(",")
current_wp_name = str(j[0])
rospy.loginfo(current_wp_name)
current_point = Point(float(j[1]),float(j[2]),float(j[3]))
current_quaternion = Quaternion(float(j[4]),float(j[5]),float(j[6]),float(j[7]))
self.locations[current_wp_name] = Pose(current_point,current_quaternion)
f.close()
rospy.loginfo(self.locations)
#Rviz Marker
self.init_markers()
self.IsWPListOK = True
def pointFromTranslation(translation):
output = Point()
output.x = translation[0]
output.y = translation[1]
return output
def get_odom(self):
# Get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return (Point(*trans), quat_to_angle(Quaternion(*rot)))
def get_odom(self):
# Get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return (Point(*trans), quat_to_angle(Quaternion(*rot)))
def get_position(self):
# Get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return Point(*trans)
def get_odom(self):
# Get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return (Point(*trans), quat_to_angle(Quaternion(*rot)))
def __init__(self, grid_map):
self.map = grid_map
self.width = grid_map.info.width
self.height = grid_map.info.height
self.resolution = grid_map.info.resolution
self.origin = Point()
self.origin.x = grid_map.info.origin.position.x
self.origin.y = grid_map.info.origin.position.y
baxter_scan_object.py 文件源码
项目:cu-perception-manipulation-stack
作者: correlllab
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def update_camera_transform(self):
t = self.tl.getLatestCommonTime("/root", "/camera_link")
position, quaternion = self.tl.lookupTransform("/root",
"/camera_link",
t)
position, quaternion = self.update_transformation(list(position), list(quaternion))
#get ICP to find rotation and translation
#multiply old position and orientation by rotation and translation
#publish new pose to /update_camera_alignment
self.update_transform.publish( Pose(Point(*position), Quaternion(*quaternion)))
def move_calib_position(self):
# move arm to the calibration position
self.calib_pose = PoseStamped(
Header(0, rospy.Time(0), self.robot.base),
Pose(Point(0.338520675898,-0.175860479474,0.0356775075197),
Quaternion(-0.0183493755758,0.708424150944, 0.704712092876, 0.0343413949013)))
self.robot.move_ik(self.calib_pose)
def _pick(self):
# State not modified until picking succeeds
try:
obj_to_get = int(self.character)
except ValueError:
rospy.logerr("Please provide a number in picking mode")
return
frame_name = "object_pose_{}".format(obj_to_get)
rospy.loginfo("Picking object {}...".format(obj_to_get))
if self.tl.frameExists(self.robot.base) and self.tl.frameExists(frame_name):
t = self.tl.getLatestCommonTime(self.robot.base, frame_name)
position, quaternion = self.tl.lookupTransform(self.robot.base,
frame_name,
t)
print("position", position)
print("quaternion", quaternion)
position = list(position)
# Vertical orientation
self.br.sendTransform(position,
[1, 0, 0, 0],
rospy.Time.now(),
"pick_pose",
self.robot.base)
# Ignore orientation from perception
pose = Pose(Point(*position),
Quaternion(1, 0, 0, 0))
h = Header()
h.stamp = t
h.frame_id = self.robot.base
stamped_pose = PoseStamped(h, pose)
self.robot.pick(stamped_pose)
self.state = 'postpick'
def translate(self, pose, direction, distance):
"""Get an PoseStamped msg and apply a translation direction * distance
"""
# Let's get the pose, move it to the tf frame, and then
# translate it
base_pose = self.tl.transformPose(self.base, pose)
base_pose.pose.position = Point(*np.array(direction) * distance +
Point2list(base_pose.pose.position))
return base_pose
def get_odom(self):
# Get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return (Point(*trans), quat_to_angle(Quaternion(*rot)))