python类setMouseCallback()的实例源码

cameracalibrator.py 文件源码 项目:camera_calibration_frontend 作者: groundmelon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def run(self):
        cv2.namedWindow("display", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
        cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)

        if self.extra_queue:
            cv2.namedWindow("extra", cv2.WINDOW_NORMAL)

        while True:
            # wait for an image (could happen at the very beginning when the queue is still empty)
            while len(self.queue) == 0:
                time.sleep(0.1)
            im = self.queue[0]
            cv2.imshow("display", im)
            k = cv2.waitKey(6) & 0xFF
            if k in [27, ord('q')]:
                rospy.signal_shutdown('Quit')
            elif k == ord('s'):
                self.opencv_calibration_node.screendump(im)
            if self.extra_queue:
                if len(self.extra_queue):
                    extra_img = self.extra_queue[0]
                    cv2.imshow("extra", extra_img)
get_line.py 文件源码 项目:MultiObjectTracker 作者: alokwhitewolf 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def run(im):
    im_disp = im.copy()
    window_name = "Draw line here."
    cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow(window_name, 910, 0)

    print " Drag across the screen to set lines.\n Do it twice"
    print " After drawing the lines press 'r' to resume\n"

    l1 = np.empty((2, 2), np.uint32)
    l2 = np.empty((2, 2), np.uint32)

    list = [l1,l2]

    mouse_down = False
    def callback(event, x, y, flags, param):
        global trigger, mouse_down

        if trigger<2:
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_down = True
                list[trigger][0] = (x, y)

            if event == cv2.EVENT_LBUTTONUP and mouse_down:
                mouse_down = False
                list[trigger][1] = (x,y)
                cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
                         (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
                trigger += 1
        else:
            pass
    cv2.setMouseCallback(window_name, callback)
    while True:
        cv2.imshow(window_name,im_disp)
        key = cv2.waitKey(10) & 0xFF

        if key == ord('r'):
            # Press key `q` to quit the program
            return list
            exit()
RRT_Star_Obstacles_final.py 文件源码 项目:Rapidly-Exploring-Random-Tree-Star-Scan 作者: vampcoder 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def getSourceAndGoal(self):
            cv2.namedWindow('image')
            cv2.setMouseCallback('image', self.draw_circle)
            cv2.imshow('image', img)
            cv2.waitKey(0)
viewer.py 文件源码 项目:video_labeler 作者: hahnyuan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self,labels,video_file,box_saver,border=30):
        """
        the GUI Labeler
        :param labels: the labels name string list
        :param video_file: the video file path
        :param border: the border of the center clip filed (white line around the video)
        :param save_dir: label result save path
        :param save_im: if write every cropped image to each label directory
        """
        self.cam = cv2.VideoCapture(video_file)
        self.video_stat = VideoStat(border)
        self.label_stat = LabelStat(labels)
        self.labels=labels
        self.box_saver=box_saver
        cv2.setMouseCallback("video", self.video_click)
        cv2.setMouseCallback("label", self.label_click)
        self.run()
homography.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data]
user_interaction.py 文件源码 项目:football-stats 作者: dev-labs-bg 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def getPerpectiveCoordinates(frame, windowName, mouse):
    cv2.imshow(windowName, frame)   
    cv2.setMouseCallback(windowName, mouse.leftClick)

    i = 0
    coords = []

    while i < 4:
        i += 1
        mouse.x = 0
        mouse.y = 0
        mouse.leftClicked = False

        while mouse.leftClicked == False:
            # wait for key press
            key = cv2.waitKey(1) & 0xFF

        coords.append((mouse.x, mouse.y))

    return coords

# debug coordinates by moving the mouse
save_template_file.py 文件源码 项目:CameraTablet 作者: dmvlasenk 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setFingerTemplate(big_image, name_template_file):
    global add_frame
    name_window = 'big image'
    cv2.namedWindow(name_window)
    cv2.setMouseCallback(name_window,save_corners)
    add_frame = np.zeros(big_image.shape, np.uint8) 
    while(True):
        frame_with_rect = cv2.add(add_frame, big_image)
        cv2.imshow(name_window,frame_with_rect)
        cur_key = cv2.waitKey(1)
        if cur_key == 27:
            break
        if cur_key == ord('s') and (len(corners_x) == 2):
            template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
            cv2.imwrite(name_template_file,template_img)
            break
    cv2.destroyAllWindows()
saveTemplateFileScript.py 文件源码 项目:CameraTablet 作者: dmvlasenk 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def setFingerTemplate(big_image, name_template_file):
    global add_frame
    name_window = 'big image'
    cv2.namedWindow(name_window)
    cv2.setMouseCallback(name_window,save_corners)
    add_frame = np.zeros(big_image.shape, np.uint8) 
    while(True):
        frame_with_rect = cv2.add(add_frame, big_image)
        cv2.imshow(name_window,frame_with_rect)
        cur_key = cv2.waitKey(1)
        if cur_key == 27:
            break
        if cur_key == ord('s') and (len(corners_x) == 2):
            template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
            cv2.imwrite(name_template_file,template_img)
            break
    cv2.destroyAllWindows()
imshow_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def mouse_event_create(win_name, cb): 
    cv2.namedWindow(win_name)
    cv2.setMouseCallback(win_name, cb)
mouse_painting.py 文件源码 项目:PyIntroduction 作者: tody411 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def setCallBack(self, win_name):
        cv2.setMouseCallback(win_name, self._callBack)
EyeCanSee.py 文件源码 项目:cv-lane 作者: kendricktan 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run)
simple-ide.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def main():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())

    # load the image, clone it, and setup the mouse callback function
    image = cv2.imread(args["image"])
    clone = image.copy()
    images = [clone, image]
    ref_pt = [None, None]

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", make_mouse_callback(images, ref_pt))

    # keep looping until the 'q' key is pressed
    while True:
        # display the image and wait for a keypress
        cv2.imshow("image", images[1])
        key = cv2.waitKey(1) & 0xFF

        # if the 'c' key is pressed, break from the loop
        if key == ord("c"):
            if ref_pt[1] is None:
                continue
            roi = clone[ref_pt[0][1]:ref_pt[1][1], ref_pt[0][0]:ref_pt[1][0]]
            interactive_save(roi)
        elif key == ord("q"):
            break

    # if there are two reference points, then crop the region of interest
    # from teh image and display it

    # close all open windows
    cv2.destroyAllWindows()
simple-ide.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def main():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())

    # load the image, clone it, and setup the mouse callback function
    image = cv2.imread(args["image"])
    clone = image.copy()
    images = [clone, image]
    ref_pt = [None, None]

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", make_mouse_callback(images, ref_pt))

    # keep looping until the 'q' key is pressed
    while True:
        # display the image and wait for a keypress
        cv2.imshow("image", images[1])
        key = cv2.waitKey(1) & 0xFF

        # if the 'c' key is pressed, break from the loop
        if key == ord("c"):
            if ref_pt[1] is None:
                continue
            roi = clone[ref_pt[0][1]:ref_pt[1][1], ref_pt[0][0]:ref_pt[1][0]]
            interactive_save(roi)
        elif key == ord("q"):
            break

    # if there are two reference points, then crop the region of interest
    # from teh image and display it

    # close all open windows
    cv2.destroyAllWindows()
ogrid_node.py 文件源码 项目:lqRRT 作者: jnez71 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, height, width, image_path):
        self.height, self.width = height, width

        self.clear_screen()

        if image_path:
            self.make_image(image_path)

        cv2.namedWindow('Draw OccupancyGrid')
        cv2.setMouseCallback('Draw OccupancyGrid', self.do_draw)
        self.drawing = 0
track.py 文件源码 项目:DrosophilaCooperative 作者: avaccari 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def selectArea(self):
        self.userInteraction = True
        cv2.namedWindow(self.selectionWindow)
        cv2.setMouseCallback(self.selectionWindow, self.mouseInteraction)
        self.workingFrame = self.processedFrame.copy()
        self.showFrame(self.selectionWindow, self.workingFrame)

        while True:
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                self.undoFrames = []
                break
            elif key == ord('c'):
                self.workingFrame = self.processedFrame.copy()
                self.trackedAreasList = []
                self.undoFrames = []
                self.showFrame(self.selectionWindow, self.workingFrame)
            elif key == ord('l'):
                try:
                    self.trackedAreasList.pop()
                except IndexError:
                    pass
                else:
                    self.workingFrame = self.undoFrames.pop()
                    self.showFrame(self.selectionWindow, self.workingFrame)
            elif key == ord('t'):
                self.undoFrames = []
                self.trackArea = self.refPt
                self.tracking = True
                self.trackDump = []
                if self.pause is True:
                    self.pause = False
                break
            elif key == ord('h'):
                self.showHelp('select')

        cv2.destroyWindow(self.selectionWindow)
        self.userInteration = False
naiverstokes:fastmarch.py 文件源码 项目:inpainting 作者: kvfrans 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, windowname, dests, colors_func):
            self.prev_pt = None
            self.windowname = windowname
            self.dests = dests
            self.colors_func = colors_func
            self.dirty = False
            self.show()
            cv2.setMouseCallback(self.windowname, self.on_mouse)
things.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _find_power_plug_thing(self):
        """ Find the power plug at the solar array box """

        """ This uses color to determine if we have a choke """
        lower = np.array([100, 40, 0], dtype = "uint8")
        upper = np.array([255, 255, 20], dtype = "uint8")
        mask = cv2.inRange(self.img, lower, upper)

        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))

        if len(sorted_contours) > 0:
            plug = self._find_a_thing(sorted_contours[0], 0, 0.06, 0, 0.06, 99.0)

            if plug is not None:
                plug.set_power_plug()
                self.things.append(plug)
                self.power_plug = plug

                if self.debug:
                    debug_img = self.img.copy()
                    for c in sorted_contours:
                        cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
                    cv2.imshow("plug picture", debug_img)
                    cv2.setMouseCallback("plug picture", self.handle_mouse)
                    cv2.waitKey(0)
                    cv2.destroyAllWindows()
army.py 文件源码 项目:srcsim2017 作者: ZarjRobotics 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def process_cloud(self):
        self.zarj.transform.init_transforms()
        self.cloud = self.zarj.eyes.get_stereo_cloud()
        self.data = self.zarj.eyes.get_cloud_data(self.cloud)
        self.raw_img, self.details = self.zarj.eyes.get_cloud_image_with_details(self.cloud)
        print "<<<< FOCUS IMAGE WINDOW AND PRESS ANY KEY TO CONTINUE >>>>"
        cv2.imshow("cloud view", raw_img)
        cv2.setMouseCallback("cloud view", handle_mouse)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
common.py 文件源码 项目:python-opencv2 作者: bunkahle 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv2.setMouseCallback(self.windowname, self.on_mouse)
common.py 文件源码 项目:python-opencv2 作者: bunkahle 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv2.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
interaction_updated_global.py 文件源码 项目:Interactive-object-tracking 作者: abhishekarya286 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def label(image) :
    '''This function along with click_and_crop() helps in labelling object and background.
       Input : Input image
       Output: selected region of interest(either obj or distractor)'''
    global refPt,cropping,img_copy,clone,list_refpt
    #image1=copy.deepcopy(image)
    #clone = image1.copy()
    #img_copy=image1.copy()
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click_and_crop)
    print "Label the object"
    print "After making a bounding box, press 'c' "
    print "if you wish to select the object again, press 'r' "

    while True:
    # display the image and wait for a keypress
        cv2.imshow("image", img_copy)
        key = cv2.waitKey(1) & 0xFF

        # if the 'r' key is pressed, reset the cropping region
        if key == ord("r"):
            image = clone.copy()
            img_copy=image.copy()

        # if the 'c' key is pressed, break from the loop
        elif key == ord("c"):
            break

    # if there are two reference points, then crop the region of interest
    # from the image and display it
    if len(refPt) == 2:
        roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
        cv2.imshow("ROI", roi)
        print "press any key"
        cv2.waitKey(0)

    cv2.destroyAllWindows() # close all open windows
    obj_img=roi  # roi containing the object
    list_refpt.append(refPt)
    print list_refpt,"check_list"
    return obj_img
lab_global_optimisation.py 文件源码 项目:Interactive-object-tracking 作者: abhishekarya286 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def label(image) :
    '''This function along with click_and_crop() helps in labelling object and background.
       Input : Input image
       Output: selected region of interest(either obj or distractor)'''
    global refPt,cropping,img_copy,clone,list_refpt
    #image1=copy.deepcopy(image)
    #clone = image1.copy()
    #img_copy=image1.copy()
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click_and_crop)
    print "Label the object"
    print "After making a bounding box, press 'c' "
    print "if you wish to select the object again, press 'r' "

    while True:
    # display the image and wait for a keypress
        cv2.imshow("image", img_copy)
        key = cv2.waitKey(1) & 0xFF

        # if the 'r' key is pressed, reset the cropping region
        if key == ord("r"):
            image = clone.copy()
            img_copy=image.copy()

        # if the 'c' key is pressed, break from the loop
        elif key == ord("c"):
            break

    # if there are two reference points, then crop the region of interest
    # from the image and display it
    if len(refPt) == 2:
        roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
        cv2.imshow("ROI", roi)
        print "press any key"
        cv2.waitKey(0)

    cv2.destroyAllWindows() # close all open windows
    obj_img=roi  # roi containing the object
    list_refpt.append(refPt)
    return obj_img
common.py 文件源码 项目:emojivis 作者: JustinShenk 项目源码 文件源码 阅读 68 收藏 0 点赞 0 评论 0
def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv2.setMouseCallback(self.windowname, self.on_mouse)
common.py 文件源码 项目:emojivis 作者: JustinShenk 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv2.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
experiment.py 文件源码 项目:ATLeS 作者: liffiton 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, tx1, tx2, ty1, ty2):
        # For mouse interaction
        self._mouse_on = False
        cv2.namedWindow("preview")
        atexit.register(cv2.destroyAllWindows)
        cv2.setMouseCallback("preview", self.mouse_callback)
        # Setup tank bounds
        self._tx1 = tx1
        self._tx2 = tx2
        self._ty1 = ty1
        self._ty2 = ty2
image_to_world.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self):

        self.br = CvBridge()

        # If you subscribe /camera/depth_registered/hw_registered/image_rect topic, the depth image and rgb image are 
        # already registered. So you don't need to call register_depth_to_rgb()
        self.depth_image_sub = rospy.Subscriber("/camera/depth_registered/hw_registered/image_rect",Image,self.depth_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_rect_color",Image,self.rgb_callback)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.ir_to_world_tvec = None
        self.ir_to_rgb_rmat = None
        self.load_extrinsics()
        self.load_intrinsics()
        self.depth_image = None
        self.rgb_image = None
        self.count = 0
        self.drawing = False # true if mouse is pressed
        self.rect_done = False
        self.ix1 = -1
        self.iy1 = -1
        self.ix2 = -1
        self.iy2 = -1

        cv2.namedWindow('RGB Image')
        cv2.setMouseCallback('RGB Image',self.draw_rect)
grasp_pos_rgbd_cluster.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self):

        self.br = CvBridge()

        # If you subscribe /camera/depth_registered/hw_registered/image_rect topic, the depth image and rgb image are 
        # already registered. So you don't need to call register_depth_to_rgb()
        self.depth_image_sub = rospy.Subscriber("/camera/depth_registered/hw_registered/image_rect",Image,self.depth_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_rect_color",Image,self.rgb_callback)

        # self.pub = rospy.Publisher('chatter', String, queue_size=10)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.ir_to_world_tvec = None
        self.ir_to_rgb_rmat = None
        self.load_extrinsics()
        self.load_intrinsics()

        self.drawing = False # true if mouse is pressed
        self.rect_done = False
        self.ix1 = -1
        self.iy1 = -1
        self.ix2 = -1
        self.iy2 = -1

        cv2.namedWindow('RGB Image')
        cv2.setMouseCallback('RGB Image',self.draw_rect)
grasp_pos.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self):

        self.br = CvBridge()

        # If you subscribe /camera/depth_registered/hw_registered/image_rect topic, the depth image and rgb image are 
        # already registered. So you don't need to call register_depth_to_rgb()
        self.depth_image_sub = rospy.Subscriber("/camera/depth_registered/hw_registered/image_rect",Image,self.depth_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_rect_color",Image,self.rgb_callback)

        # self.pub = rospy.Publisher('chatter', String, queue_size=10)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.ir_to_world_tvec = None
        self.ir_to_rgb_rmat = None
        self.load_extrinsics()
        self.load_intrinsics()

        self.drawing = False # true if mouse is pressed
        self.rect_done = False
        self.ix1 = -1
        self.iy1 = -1
        self.ix2 = -1
        self.iy2 = -1

        cv2.namedWindow('RGB Image')
        cv2.setMouseCallback('RGB Image',self.draw_rect)
eos_bindings_playground.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def klick_landmarks_on_image():
    global current_landmark, klicked_landmarks

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click)


    show_lms_on_image()
    image     = cv2.imread('/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.JPG')
    for lm_idx in range(68):
        while True:
            temp_image = image.copy()
            lms_to_be_shown = klicked_landmarks#+current_landmark
            if len(current_landmark)>0:
                lms_to_be_shown =klicked_landmarks + [current_landmark]

            if len(lms_to_be_shown)>0:
                draw_lms_on_image(temp_image, lms_to_be_shown)

            cv2.imshow("image", temp_image)
            key = cv2.waitKey(1) & 0xFF

            if key == ord(" "):
                if len(current_landmark)>0:
                    klicked_landmarks.append(current_landmark)
                    break
            if key == ord("q"):
                return 0
        current_landmark=[]
    cv2.destroyWindow("image")

    #now write lm file 
    landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts'
    with open(landmark_file, "w") as lf:
        lf.write('version: 1\n')
        lf.write('n_points: 68\n')
        lf.write('{\n')
        for landmark in klicked_landmarks:
            lf.write(str(landmark[0])+" "+str(landmark[1])+"\n")
        lf.write('}\n')
click_bb.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 84 收藏 0 点赞 0 评论 0
def main():
    all_bb = []
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click)
    images = glob.glob('/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/*/frames/000001.png')
    output_file_path = '/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/bb_clicked_philipp.log'
    for i, image_path in enumerate(images):
        print ('image',image_path,'(',i,'of',len(images),')')
        image     = cv2.imread(image_path)
        upper_left_point, lower_right_point = click_bb_on_image(image)
        all_bb.append([upper_left_point[0], upper_left_point[1], lower_right_point[0], lower_right_point[1]])
        #print (upper_left_point, lower_right_point)
        open(output_file_path, 'a').write(str(image_path)+' '+str(upper_left_point[0])+' '+str(upper_left_point[1])+' '+str(lower_right_point[0])+' '+str(lower_right_point[1])+'\n')
    cv2.destroyWindow("image")





    #now write lm file 
#   landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts'
#   with open(landmark_file, "w") as lf:
#       lf.write('version: 1\n')
#       lf.write('n_points: 68\n')
#       lf.write('{\n')
#       for landmark in klicked_landmarks:
#           lf.write(str(landmark[0])+" "+str(landmark[1])+"\n")
#       lf.write('}\n')
#   return x, y, w, h


问题


面经


文章

微信
公众号

扫码关注公众号