python类CvBridgeError()的实例源码

registration_stereo.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def ir_calib_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (y_num,x_num))
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (y_num,x_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "w")
            data = {'corners':ir_corners.tolist()}
            yaml.dump(data, depth_stream)

            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
get_extrinsics.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def rgb_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
            rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print("The world coordinate system's origin in camera's coordinate system:")
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
            data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
            yaml.dump(data, rgb_stream)


            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)
get_extrinsics.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def ir_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)


            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
registration_stereo.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def rgb_calib_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (y_num,x_num),None)

        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (y_num,x_num), rgb_corners,rgb_ret)

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_corners.yaml", "w")
            data = {'corners':rgb_corners.tolist()}
            yaml.dump(data, rgb_stream)

            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)
pose_estimation.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def rgb_callback(self,data):
        try:
            img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        ret, corners = cv2.findChessboardCorners(img, (x_num,y_num))
        cv2.imshow('img',img)
        cv2.waitKey(5)
        if ret == True:
            cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
            tempimg = img.copy()
            cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist)
            print("rvecs:")
            print(rvec)
            print("tvecs:")
            print(tvec)
            # project 3D points to image plane
            imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist)

            imgpts = np.int32(imgpts).reshape(-1,2)

            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4)  #BGR
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4)
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4)

            cv2.imshow('img',tempimg)
            cv2.waitKey(5)
robot.py 文件源码 项目:ab2016-ros-gazebo 作者: akademikbilisim 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def camera_callback(self, msg):
        try:
            self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        except cv_bridge.CvBridgeError:
            return

        gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(blur, 30, 150)

        cv2.imshow("Robot Camera", canny)
        cv2.waitKey(1)
BgGui.py 文件源码 项目:BeltaGo 作者: 54BayMax 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def imageCallback(self, msg):
        global qImg
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg, "rgb8")
            cv_image=cv2.resize(cv_image, (640, 480))
            image=PImage.frombytes("RGB", (640, 480), cv_image.tostring())
            Lock.acquire()
            qImg=ImageQt(image)
            Lock.release()

        except CvBridgeError as e:
            print 'a'
            print(e)
face_recog.py 文件源码 项目:tbotnav 作者: patilnabhi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e 

        inImgarr = np.array(inImg)
        try:
            self.outImg, self.face_names.data = self.process_image(inImgarr) 
            # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))

            # self.face_names.data = ['a', 'b']
            # print self.face_names
            self.name_pub.publish(self.face_names)
            self.all_names.data = self.names.values()
            # print self.all_names
            self.all_names_pub.publish(self.all_names)

            cv2.imshow("Face Recognition", self.outImg)
            cv2.waitKey(3)

            # print self.face_names

        except:
            print "Failed! Ensure data is collected & trained..."
train_faces2.py 文件源码 项目:tbotnav 作者: patilnabhi 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e    

        inImgarr = np.array(inImg)
        self.outImg = self.process_image(inImgarr)

        # self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))    

        # cv2.namedWindow("Capture Face")
        cv2.imshow('Capture Face', self.outImg)
        cv2.waitKey(3)

        if self.count == 100*self.cp_rate:
            rospy.loginfo("Data Captured!")
            rospy.loginfo("Training Data...")
            self.fisher_train_data()
            rospy.signal_shutdown('done')
get_hand_gestures.py 文件源码 项目:tbotnav 作者: patilnabhi 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def depth_callback(self, ros_image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(ros_image)
        except CvBridgeError, e:
            print e

        inImgarr = np.array(inImg, dtype=np.uint16)
        # inImgarr = cv2.GaussianBlur(inImgarr, (3, 3), 0)
        # cv2.normalize(inImgarr, inImgarr, 0, 1, cv2.NORM_MINMAX) 

        self.outImg, self.num_fingers = self.process_depth_image(inImgarr) 
        # outImg = self.process_depth_image(inImgarr) 
        # rate = rospy.Rate(10)        
        self.num_pub.publish(self.num_fingers)
        # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))
        # rate.sleep()

        cv2.imshow("Hand Gesture Recognition", self.outImg)
        cv2.waitKey(3)
face_recog2.py 文件源码 项目:tbotnav 作者: patilnabhi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e 

        inImgarr = np.array(inImg)
        try:
            self.outImg, self.face_names.data = self.process_image(inImgarr) 
            # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))

            # self.face_names.data = ['a', 'b']
            self.name_pub.publish(self.face_names)

            cv2.imshow("Face Recognition", self.outImg)
            cv2.waitKey(3)

            # print self.face_names

        except:
            print "Failed! Ensure data is collected & trained..."
lidar_to_img_test_v2.py 文件源码 项目:self-driving-car-obstacle-detector 作者: ajimenezh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def callbackCamera(msg):
    print('callbackCamera: msg : seq=%d, timestamp=%010d:%09d'%(
        msg.header.seq, msg.header.stamp.secs, msg.header.stamp.nsecs
    ))

    try:
        cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
    except CvBridgeError as e:
        print(e)

    s = ('%010d%09d'%(
        msg.header.stamp.secs, msg.header.stamp.nsecs
    ))

    filepath = "./camera/" + s + ".jpg"

    #print filepath
    height, width, channels = cv_image.shape
    cv_image = cv_image[400:height-200, 0:width]
    cv2.imwrite(filepath, cv_image)

    detector.detect(filepath)

    pass
image_to_world.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def depth_callback(self,data):
        try:
            self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
        except CvBridgeError as e:
            print(e)
        # print "depth"

        depth_min = np.nanmin(self.depth_image)
        depth_max = np.nanmax(self.depth_image)


        depth_img = self.depth_image.copy()
        depth_img[np.isnan(self.depth_image)] = depth_min
        depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
        cv2.imshow("Depth Image", depth_img)
        cv2.waitKey(5)
        # stream = open("/home/chentao/depth_test.yaml", "w")
        # data = {'img':depth_img.tolist()}
        # yaml.dump(data, stream)
image_to_world.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def rgb_callback(self,data):
        try:
            self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)



        if self.drawing or self.rect_done:
            if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
                cv2.rectangle(self.rgb_image,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2)
                if self.rect_done:
                    center_point = self.get_center_point()
                    cv2.circle(self.rgb_image, tuple(center_point.astype(int)), 3, (0,0,255),-1)

        cv2.imshow('RGB Image', self.rgb_image)
        cv2.waitKey(5)
        # print "rgb"
grasp_pos_rgbd_cluster.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def depth_callback(self,data):
        try:
            self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
        except CvBridgeError as e:
            print(e)
        # print "depth"

        depth_min = np.nanmin(self.depth_image)
        depth_max = np.nanmax(self.depth_image)


        depth_img = self.depth_image.copy()
        depth_img[np.isnan(self.depth_image)] = depth_min
        depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
        cv2.imshow("Depth Image", depth_img)
        cv2.waitKey(5)
        # stream = open("/home/chentao/depth_test.yaml", "w")
        # data = {'img':depth_img.tolist()}
        # yaml.dump(data, stream)
grasp_pos_rgbd_cluster.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def rgb_callback(self,data):
        try:
            self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)


        tempimg = self.rgb_image.copy()
        if self.drawing or self.rect_done:
            if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
                cv2.rectangle(tempimg,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2)
                if self.rect_done:
                    center_point = self.get_center_point()
                    cv2.circle(tempimg, tuple(center_point.astype(int)), 3, (0,0,255),-1)

        cv2.imshow('RGB Image', tempimg)
        cv2.waitKey(5)
        # print "rgb"
grasp_pos.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def depth_callback(self,data):
        try:
            self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
        except CvBridgeError as e:
            print(e)
        # print "depth"

        depth_min = np.nanmin(self.depth_image)
        depth_max = np.nanmax(self.depth_image)


        depth_img = self.depth_image.copy()
        depth_img[np.isnan(self.depth_image)] = depth_min
        depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
        cv2.imshow("Depth Image", depth_img)
        cv2.waitKey(5)
bag_utils.py 文件源码 项目:didi-competition 作者: udacity 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def write_image(self, outdir, msg, fmt='png'):
        results = {}
        image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt)
        try:
            if hasattr(msg, 'format') and 'compressed' in msg.format:
                buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data)
                cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
                if cv_image.shape[2] != 3:
                    print("Invalid image %s" % image_filename)
                    return results
                results['height'] = cv_image.shape[0]
                results['width'] = cv_image.shape[1]
                # Avoid re-encoding if we don't have to
                if check_image_format(msg.data) == fmt:
                    buf.tofile(image_filename)
                else:
                    cv2.imwrite(image_filename, cv_image)
            else:
                cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
                cv2.imwrite(image_filename, cv_image)
        except CvBridgeError as e:
            print(e)
        results['filename'] = image_filename
        return results
detect_crazyflie.py 文件源码 项目:ROS-Robotics-By-Example 作者: PacktPublishing 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def depth_callback(self, msg):

      # create OpenCV depth image using defalut passthrough encoding
      try:
         depth_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
      except CvBridgeError as e:
         print(e)

      # using green ball (u, v) position, find depth value of Crazyflie point and divide by 1000
      # to change millimeters into meters (for Kinect sensors only)
      self.cf_d = depth_image[self.cf_v, self.cf_u] / 1000.0
      rospy.loginfo("Depth: x at %d  y at %d  z at %f", int(self.cf_u), int(self.cf_v), self.cf_d)

      # if depth value is zero, use the last non-zero depth value
      if self.cf_d == 0:
         self.cf_d = self.last_d
      else:
         self.last_d = self.cf_d

      # publish Crazyflie tf transform
      self.update_cf_transform (self.cf_u, self.cf_v, self.cf_d)


   # This function builds the Crazyflie base_link tf transform and publishes it.
vis_tools.py 文件源码 项目:baxter 作者: destrygomorphous 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def make_mask(limb, filename):

    """
    Given a limb (right or left) and a name to save to
    (in the baxter_tools/share/images/ directory),
    create a mask of any dark objects in the image from the camera
    and save it.
    """

    image_sub = rospy.Subscriber(
        '/cameras/' + limb + '_hand_camera/image',Image,callback)


    try:
        bridge = CvBridge()
        cv_image = bridge.imgmsg_to_cv2(img, "bgr8")
    except CvBridgeError, e:
        print e

    msk = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV)
    return msk
detect_crazyflie.py 文件源码 项目:ROS-Robotics-by-Example 作者: FairchildC 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def depth_callback(self, msg):

      # create OpenCV depth image using defalut passthrough encoding
      try:
         depth_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
      except CvBridgeError as e:
         print(e)

      # using green ball (u, v) position, find depth value of Crazyflie point and divide by 1000
      # to change millimeters into meters (for Kinect sensors only)
      self.cf_d = depth_image[self.cf_v, self.cf_u] / 1000.0
      rospy.loginfo("Depth: x at %d  y at %d  z at %f", int(self.cf_u), int(self.cf_v), self.cf_d)

      # if depth value is zero, use the last non-zero depth value
      if self.cf_d == 0:
         self.cf_d = self.last_d
      else:
         self.last_d = self.cf_d

      # publish Crazyflie tf transform
      self.update_cf_transform (self.cf_u, self.cf_v, self.cf_d)


   # This function builds the Crazyflie base_link tf transform and publishes it.
sitl_file.py 文件源码 项目:formulapi_ROS_simulator 作者: wilselby 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def run(self):

      while True:

    # Only run loop if we have an image
    if self.process:

            self.imgprocess.ProcessingImage(self.latestimage)   # FormulaPi Image Processing Function

            self.AdjustMotorSpeed(self.imgprocess.trackoffset)  # Compute Motor Commands From Image Output

        # Publish Processed Image
        cvImage = self.imgprocess.outputImage
        try:
            imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8")
            self.image_pub.publish(imgmsg)
        except CvBridgeError as e:
                print(e)

            #cv2.imshow("Image window", self.cvImage)
            #cv2.waitKey(3)
inverse_perspective_mapping_node.py 文件源码 项目:autonomous_driving 作者: StatueFungus 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            rospy.logerr(e)

        if self._camera_needs_to_be_initialized(cv_image):
            self._initialize_camera_parameters(cv_image)
            self._calculate_transformation_matrix()

        if self.transformation_matrix is None:
            self._calculate_transformation_matrix()

        warped = self.img_prep.warp_perspective(cv_image, self.transformation_matrix, self.transformated_image_resolution)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(warped, "bgr8"))
        except CvBridgeError as e:
            rospy.logerr(e)
lane_tracking_node.py 文件源码 项目:autonomous_driving 作者: StatueFungus 项目源码 文件源码 阅读 53 收藏 0 点赞 0 评论 0
def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
            heigth, width, _ = cv_image.shape
        except CvBridgeError as e:
            rospy.logerr(e)

        if self.reset_tracking is True:
            self.init_lanemodel()
            self.reset_tracking = False

        self.lane_model.update_segments(cv_image.copy())
        self.lane_model.draw_segments(cv_image)
        state_point_x = self.lane_model.state_point_x()

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
            self.setpoint_pub.publish(0.0)
            if state_point_x:
                self.state_pub.publish(state_point_x - int(width/2))
        except CvBridgeError as e:
            rospy.logerr(e)
bag_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def compressed_imgmsg_to_cv2(cmprs_img_msg, desired_encoding = "passthrough"):
    """
    Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`.

    :param cmprs_img_msg:   A :cpp:type:`sensor_msgs::CompressedImage` message
    :param desired_encoding:  The encoding of the image data, one of the following strings:

       * ``"passthrough"``
       * one of the standard strings in sensor_msgs/image_encodings.h

    :rtype: :cpp:type:`cv::Mat`
    :raises CvBridgeError: when conversion is not possible.

    If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
    Otherwise desired_encoding must be one of the standard image encodings

    This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.

    If the image only has one channel, the shape has size 2 (width and height)
    """
    str_msg = cmprs_img_msg.data
    buf = np.ndarray(shape=(1, len(str_msg)),
                      dtype=np.uint8, buffer=cmprs_img_msg.data)
    im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)

    if desired_encoding == "passthrough":
        return im

    try:
        res = cvtColor2(im, "bgr8", desired_encoding)
    except RuntimeError as e:
        raise CvBridgeError(e)

    return res
kalibr_camera_focus.py 文件源码 项目:camera_calibration_frontend 作者: groundmelon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def callback(self, msg):
        #convert image to opencv
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg)
            np_image= np.array(cv_image)
        except CvBridgeError, e:
            print "Could not convert ros message to opencv image: ", e
            return

        #calculate the fft magnitude
        img_float32 = np.float32(np_image)
        dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
        dft_shift = np.fft.fftshift(dft)
        magnitude_spectrum = cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])

        #normalize
        magnitude_spectrum_normalized = magnitude_spectrum / np.sum(magnitude_spectrum)

        #frequency domain entropy (-> Entropy Based Measure of Camera Focus. Matej Kristan, Franjo Pernu. University of Ljubljana. Slovenia)
        fde = np.sum( magnitude_spectrum_normalized * np.log(magnitude_spectrum_normalized) )

        y = 20; x = 20
        text = "fde: {0}   (minimize this for focus)".format(np.sum(fde))
        np_image = cv2.cvtColor(np_image, cv2.COLOR_GRAY2BGR)
        cv2.putText(np_image, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 255), thickness=2)
        cv2.imshow(self.windowNameOrig, np_image)
        cv2.waitKey(10)
rgb_object_detection.py 文件源码 项目:geom_rcnn 作者: asbroad 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def img_cb(self, msg):
        try:
          self.cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        except CvBridgeError as e:
          print(e)
store_data.py 文件源码 项目:geom_rcnn 作者: asbroad 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def img_cb(self, msg):
        try:
          self.cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        except CvBridgeError as e:
          print(e)
annotation.py 文件源码 项目:image_recognition 作者: tue-robotics 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _image_callback(self, msg):
        """
        Called when a new sensor_msgs/Image is coming in
        :param msg: The image messaeg
        """
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        except CvBridgeError as e:
            rospy.logerr(e)

        self._image_widget.set_image(cv_image)
test.py 文件源码 项目:image_recognition 作者: tue-robotics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _image_callback(self, msg):
        """
        Sensor_msgs/Image callback
        :param msg: The image message
        """
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        except CvBridgeError as e:
            rospy.logerr(e)
            return

        self._image_widget.set_image(cv_image)


问题


面经


文章

微信
公众号

扫码关注公众号