python类Queue()的实例源码

pool.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, pool_names, max_restarts=0, options=None):
        self.names = pool_names
        self.queue = multiprocessing.Queue()
        self.pool = dict()
        self.max_restarts = max_restarts
        self.options = options or dict()

        self.dog_path = os.curdir
        self.dog_handler = LiveReload(self)
        # self.dog_observer = Observer()
        # self.dog_observer.schedule(self.dog_handler, self.dog_path, recursive=True)

        if multiprocessing.get_start_method() != 'fork':  # pragma: no cover
            root_logger = logging.getLogger()
            self.log_listener = QueueListener(self.queue, *root_logger.handlers)

        # TODO: Find out how to get the watchdog + livereload working on a later moment.
        # self.dog_observer.start()

        self._restarts = dict()
data_process.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, data_queue, data_paths, repeat=True):
        '''
        data_queue : Multiprocessing queue
        data_paths : list of data and label pair used to load data
        repeat : if set True, return data until exit is set
        '''
        super(DataProcess, self).__init__()
        # Queue to transfer the loaded mini batches
        self.data_queue = data_queue
        self.data_paths = data_paths
        self.num_data = len(data_paths)
        self.repeat = repeat

        # Tuple of data shape
        self.batch_size = cfg.CONST.BATCH_SIZE
        self.exit = Event()
        self.shuffle_db_inds()
Trainer.py 文件源码 项目:rank-ordered-autoencoder 作者: paulbertens 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def data_loading(minibatch_size, data_iterator, shapeInput, exit_size):
    queue_train = Queue(maxsize=exit_size*10)
    queue_test = Queue(maxsize=exit_size*10)
    def start_loading():
        for e in range(exit_size):
            iterator_train = data_iterator(shapeInput, minibatch_size, shuffle=True, train=True)
            iterator_test = data_iterator(shapeInput, minibatch_size, shuffle=True, train=False)
            for new_input in iterator_train:
                while queue_train.full():
                    print('Queue full')
                    time.sleep(30)
                queue_train.put(new_input)
                new_input_test = iterator_test.next()
                queue_test.put(new_input_test)
        print('Exiting queue')

    t = threading.Thread(target=start_loading)
    t.daemon = True
    t.start()
    return queue_train, queue_test
index.py 文件源码 项目:fccforensics 作者: RagtagOpen 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def run(self):
        self.total = self.get_total() or 5000000
        if not self.total:
            print('error loading document total; using estimate')

        index_queue = multiprocessing.Queue()

        bulk_index_process = multiprocessing.Process(
            target=self.bulk_index, args=(index_queue,),
        )
        bulk_index_process.start()

        for comment in self.iter_comments():
            self.stats['fetched'] += 1
            if not self.stats['fetched'] % 500:
                print('fetched %s/%s\t%s%%\t%s' % (self.stats['fetched'], self.total,
                    int(self.stats['fetched'] / self.total * 100),
                    comment['date_disseminated']))
            index_queue.put(comment)

        index_queue.put(None)
        bulk_index_process.join()
        return self.stats['fetched']
pjf_server.py 文件源码 项目:PyJFuzz 作者: mseclab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, configuration):
        self.client_queue = multiprocessing.Queue(0)
        self.apply_patch()
        self.logger = self.init_logger()
        if ["debug", "html", "content_type", "notify", "ports"] not in configuration:
            raise PJFMissingArgument()
        if configuration.debug:
            print("[\033[92mINFO\033[0m] Starting HTTP ({0}) and HTTPS ({1}) built-in server...".format(
                configuration.ports["servers"]["HTTP_PORT"],
                configuration.ports["servers"]["HTTPS_PORT"]
            ))
        if not configuration.content_type:
            configuration.content_type = False
        if not configuration.content_type:
            configuration.content_type = "application/json"
        self.config = configuration
        self.json = PJFFactory(configuration)
        self.https = SSLWSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTPS_PORT"])
        self.http = WSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTP_PORT"])
        self.httpsd = multiprocessing.Process(target=run, kwargs={"server": self.https, "quiet": True})
        self.httpd = multiprocessing.Process(target=run, kwargs={"server": self.http, "quiet": True})
        if self.config.fuzz_web:
            self.request_checker = Thread(target=self.request_pool, args=())
        self.logger.debug("[{0}] - PJFServer successfully initialized".format(time.strftime("%H:%M:%S")))
data_feeder.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _launch_pipeline(self):
    """This method creates two queues.
    filename_queue: stores the list of filesnames in data_file and label_file
    data_queue: stores the mini-batch
    """

    self.data_processes = [] # Holds process handles

    queue_size = 2 * self.num_preprocess_threads + 2 * self.num_gpu_towers
    self.data_queue = Queue(queue_size)  # This queue stores the data
    image_files = open(self.data_file, 'r').readlines()
    labels = open(self.label_file, 'r').readlines()
    print 'Size of queue: ', queue_size

    self.filename_queue = Queue(len(image_files))  # This queue stores the filenames
    p = Process(target=self._create_filename_queue, args=(self.filename_queue, image_files, labels, self.num_epochs))
    p.start()
    self.data_processes.append(p)

    print 'Data feeder started'
    for each_worker in range(self.num_preprocess_threads):
      p = Process(target=self._each_worker_process, args=(self.data_queue,))
      p.start()
      self.data_processes.append(p)
layer.py 文件源码 项目:dpl 作者: ppengtang 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
layer.py 文件源码 项目:adversarial-frcnn 作者: xiaolonw 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
buffering.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def buffered_gen_mp(source_gen, buffer_size=2):
    """
    Generator that runs a slow source generator in a separate process.
    buffer_size: the maximal number of items to pre-generate (length of the buffer)
    """
    if buffer_size < 2:
        raise RuntimeError("Minimal buffer size is 2!")

    buffer = mp.Queue(maxsize=buffer_size - 1)

    # the effective buffer size is one less, because the generation process
    # will generate one extra element and block until there is room in the buffer.

    def _buffered_generation_process(source_gen, buffer):
        for data in source_gen:
            buffer.put(data, block=True)
        buffer.put(None)  # sentinel: signal the end of the iterator
        buffer.close()  # unfortunately this does not suffice as a signal: if buffer.get()
        # was called and subsequently the buffer is closed, it will block forever.

    process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
    process.start()

    for data in iter(buffer.get, None):
        yield data
buffering.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def buffered_gen_threaded(source_gen, buffer_size=5):
    """
    Generator that runs a slow source generator in a separate thread. Beware of the GIL!
    buffer_size: the maximal number of items to pre-generate (length of the buffer)
    """
    if buffer_size < 2:
        raise RuntimeError("Minimal buffer size is 2!")

    buffer = Queue.Queue(maxsize=buffer_size - 1)

    # the effective buffer size is one less, because the generation process
    # will generate one extra element and block until there is room in the buffer.

    def _buffered_generation_thread(source_gen, buffer):
        for data in source_gen:
            buffer.put(data, block=True)
        buffer.put(None)  # sentinel: signal the end of the iterator

    thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
    thread.daemon = True
    thread.start()

    for data in iter(buffer.get, None):
        yield data
layer.py 文件源码 项目:fast-rcnn-distillation 作者: xiaolonw 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
layer_pi.py 文件源码 项目:fast-rcnn-distillation 作者: xiaolonw 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
layer.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
upnp_reciever.py 文件源码 项目:Static-UPnP 作者: nigelb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def start(self):
        self.setup_sockets()
        import StaticUPnP_Settings
        permissions = Namespace(**StaticUPnP_Settings.permissions)
        print(permissions)
        if permissions.drop_permissions:
            self.drop_privileges(permissions.user, permissions.group)

        self.running = Value(ctypes.c_int, 1)
        self.queue = Queue()
        self.reciever_thread = Process(target=self.socket_handler, args=(self.queue, self.running))
        self.reciever_thread.start()
        self.schedule_thread = Process(target=self.schedule_handler, args=(self.running,))
        self.schedule_thread.start()
        self.response_thread = Process(target=self.response_handler, args=(self.queue, self.running))
        self.response_thread.start()
downloadandsplit.py 文件源码 项目:histwords 作者: williamleif 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def run_parallel(num_processes, out_dir, source):
    page = requests.get("http://storage.googleapis.com/books/ngrams/books/datasetsv2.html")
    pattern = re.compile('href=\'(.*%s-%s-%s-.*\.gz)' % (source, TYPE, VERSION))
    urls = pattern.findall(page.text)
    del page
    queue = Queue()
    for url in urls:
        queue.put(url)
    ioutils.mkdir(out_dir + '/' + source + '/raw')
    download_dir = out_dir + '/' + source + '/raw/'
    ioutils.mkdir(download_dir)
    procs = [Process(target=split_main, args=[i, queue, download_dir]) for i in range(num_processes)]
    for p in procs:
        p.start()
    for p in procs:
        p.join()
main.py 文件源码 项目:IPProxy 作者: yutian2011 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def main():
    ip_queue = multiprocessing.Queue()
    msg_queue = multiprocessing.Queue()
    p1 = multiprocessing.Process(target=get_proxy,args=(ip_queue,msg_queue))
    p2 = multiprocessing.Process(target=test_and_verify.verify_db_data,args=(ip_queue,msg_queue))
    p3 = [multiprocessing.Process(target=test_and_verify.gevent_queue,args=(ip_queue,msg_queue)) for i in range(settings.TEST_PROCESS_NUM)]
    p4 = multiprocessing.Process(target=web_cache_run,args=(ip_queue,))
    p1.start()
    p2.start()
    for p in p3:
        p.start()
    pid_list = [os.getpid(),p1.pid,p2.pid,]
    pid_list.extend(p.pid for p in p3)
    if WEB_USE_REDIS_CACHE:
        p4.start()
        pid_list.append(p4.pid)
    with open(PID,"w") as f:
        f.write(json.dumps(pid_list))
    p1.join()
    p2.join()
    for p in p3:
        p.join()
    if WEB_USE_REDIS_CACHE:
        p4.join()
tests_channel.py 文件源码 项目:Brightside 作者: BrighterCommand 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_handle_receive_on_a_channel(self):
        """
        Given that I have a channel
        When I receive on that channel
        Then I should get a message via the consumer
        """

        body = BrightsideMessageBody("test message")
        header = BrightsideMessageHeader(uuid4(), "test topic", BrightsideMessageType.MT_COMMAND)
        message = BrightsideMessage(header, body)

        fake_queue = [message]
        consumer = FakeConsumer(fake_queue)

        channel = Channel("test", consumer, Pipeline())

        msg = channel.receive(1)

        self.assertEqual(message.body.value, msg.body.value)
        self.assertEqual(message.header.topic, msg.header.topic)
        self.assertEqual(message.header.message_type, msg.header.message_type)
        self.assertEqual(0, len(fake_queue))  # We have read the queue
        self.assertTrue(channel.state == ChannelState.started)  # We don't stop because we consume a message
tests_channel.py 文件源码 项目:Brightside 作者: BrighterCommand 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_handle_acknowledge(self):
        """
        Given that I have a channel
        When I acknowlege a message on that channel
        Then I should acknowledge the message on the consumer
        """

        body = BrightsideMessageBody("test message")
        header = BrightsideMessageHeader(uuid4(), "test topic", BrightsideMessageType.MT_COMMAND)
        message = BrightsideMessage(header, body)

        fake_queue = [message]
        consumer = FakeConsumer(fake_queue)

        channel = Channel("test", consumer, Pipeline())

        channel.acknowledge(message)

        self.assertTrue(consumer.has_acknowledged(message))
tests_channel.py 文件源码 项目:Brightside 作者: BrighterCommand 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_handle_requeue(self):
        """
        Given that I have a channel
        When I receive a requeue on that channel
        I should ask the the consumer to requeue the message
        """

        body = BrightsideMessageBody("test message")
        header = BrightsideMessageHeader(uuid4(), "test topic", BrightsideMessageType.MT_COMMAND)
        message = BrightsideMessage(header, body)

        fake_queue = []
        consumer = FakeConsumer(fake_queue)

        channel = Channel("test", consumer, Pipeline())

        channel.requeue(message)

        self.assertEqual(len(consumer), 1)
common.py 文件源码 项目:geppetto 作者: datosio 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, *args, **kwargs):
        test_notes = global_vars['test_notes']
        pause_reporting = global_vars['pause_reporting']

        def wrapper(func, test_notes, pause_reporting, **kwargs):
            """

            :param func: function to pass to multiprocessing.Process.
            :param test_notes: multiprocessing Queue() instance. Allows us to add notes to
            :param disable_reporting: multiprocessing Event() instance. Turns off reporting to terminal when input needed.
            :param kwargs: dictionary that contains all args and kwargs being sent to wrapped function.
            :return:
            """
            global_vars['test_notes'] = test_notes
            global_vars['pause_reporting'] = pause_reporting
            args_ = kwargs['args'] if 'args' in kwargs else ()
            kwargs_ = kwargs['kwargs'] if 'kwargs' in kwargs else {}
            return func(*args_, **kwargs_)

        wrapper_args = [kwargs['target'], test_notes, pause_reporting]
        wrapper_kwargs = kwargs

        multiprocessing.Process.__init__(self, target=wrapper, args=wrapper_args, kwargs=wrapper_kwargs)
agent.py 文件源码 项目:osbrain 作者: opensistemas-hub 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, name, nsaddr=None, addr=None, serializer=None,
                 transport=None, base=Agent, attributes=None):
        super().__init__()
        self.name = name
        self._daemon = None
        self.host, self.port = address_to_host_port(addr)
        if self.port is None:
            self.port = 0
        self.nsaddr = nsaddr
        self.serializer = serializer
        self.transport = transport
        self.base = base
        self.shutdown_event = multiprocessing.Event()
        self.queue = multiprocessing.Queue()
        self.sigint = False
        self.attributes = attributes
application.py 文件源码 项目:hienoi 作者: christophercrouzet 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _receive_message(c, block=False):
    """Receive a message."""
    if isinstance(c, multiprocessing.queues.Queue):
        try:
            message = c.get(block=block)
        except queue.Empty:
            return None
    else:
        if not block and not c.poll():
            return None

        try:
            message = c.recv()
        except EOFError:
            return None

    return message
layer.py 文件源码 项目:py-faster-rcnn-tk1 作者: joeking11829 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
sessions.py 文件源码 项目:fluffy 作者: m4ce 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test(self):
        """Test IPTables firewall rules

        Returns:
            (bool, Optional[str]): A tuple with the first object being True if the test succeeded, else False. The second object is a string storing an optional error message.

        """

        rules = self.build(chains=self.chains, interfaces=self.interfaces,
                           addressbook=self.addressbook, rules=self.rules, services=self.services)
        tmpfile = tempfile.NamedTemporaryFile(
            dir=self._sessions_dir, prefix='test_', delete=False)
        tmpfile.write("\n".join(rules))
        tmpfile.close()
        os.chmod(tmpfile.name, 0755)

        q = Queue()
        p = Process(target=self._test, args=(tmpfile.name, q))
        p.start()
        p.join()

        os.remove(tmpfile.name)

        return q.get()
utils.py 文件源码 项目:POT 作者: rflamary 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
    """ paralell map for multiprocessing """
    q_in = multiprocessing.Queue(1)
    q_out = multiprocessing.Queue()

    proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out))
            for _ in range(nprocs)]
    for p in proc:
        p.daemon = True
        p.start()

    sent = [q_in.put((i, x)) for i, x in enumerate(X)]
    [q_in.put((None, None)) for _ in range(nprocs)]
    res = [q_out.get() for _ in range(len(sent))]

    [p.join() for p in proc]

    return [x for i, x in sorted(res)]
__init__.py 文件源码 项目:gax-python 作者: googleapis 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, operation, client, result_type, metadata_type,
                 call_options=None):
        """
        Args:
            operation (google.longrunning.Operation): the initial long-running
                operation object.
            client
                (google.gapic.longrunning.operations_client.OperationsClient):
                a client for the long-running operation service.
            result_type (type): the class type of the result.
            metadata_type (Optional[type]): the class type of the metadata.
            call_options (Optional[google.gax.CallOptions]): the call options
                that are used when reloading the operation.
        """
        self._operation = operation
        self._client = client
        self._result_type = result_type
        self._metadata_type = metadata_type
        self._call_options = call_options
        self._queue = mp.Queue()
        self._process = None
manager.py 文件源码 项目:swarm 作者: a7vinx 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_result(self):
        """
        Get result from result queue, do task index confirm meanwhile
        Return '' if all tasks have been confirmed

        Raises:
            Queue.Empty: can not get response within timeout
        """
        # check whether all task has been confirmed
        # if so, return ''
        if self._task_confirm_num==self._cur_task_num:
            return ''
        # may throw Queue.Empty here
        task_result=self._result_queue.get(block=True,timeout=self._timeout)
        resultl=task_result.split('|') 
        index=int(resultl[1],10)
        result='|'.join(resultl[2:])
        # do confirm
        # if it is duplicate, try to get result again
        if self._task_confirm_list[index]!=0:
            return self.get_result()
        self._task_confirm_list[index]=1
        self._task_confirm_num+=1
        LOG.debug('get result: %s'%task_result.replace('\n',' '))
        return result
scene.py 文件源码 项目:transpyler 作者: Transpyler 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, parent=None, fps=30, inbox=None, outbox=None):
        super().__init__(parent)
        self._fps = fps
        self._interval = 1 / fps
        self.startTimer(1000 / fps)

        # Connect signals to slots
        self.clearScreenSignal.connect(self.clearScreen)
        self.restartScreenSignal.connect(self.restartScreen)

        # Creates mail boxes
        self._inbox = inbox = Queue() if inbox is None else inbox
        self._outbox = outbox = Queue() if outbox is None else outbox

        # Init
        self._turtles = QGraphicsSceneGroup(self, inbox=inbox, outbox=outbox)
        self._tasks = deque()
        assert self._turtles.inbox is self._inbox
        assert self._turtles.outbox is self._outbox
runners.py 文件源码 项目:transpyler 作者: Transpyler 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def start_qt_scene_app_subprocess():
    """
    Starts a remote sub-process that initializes a TurtleScene widget and Qt's
    mainloop.
    """

    inbox = MailboxState.inbox = Queue()
    outbox = MailboxState.outbox = Queue()
    process = Process(target=start_qt_scene_app,
                      kwargs=dict(outbox=outbox, inbox=inbox, ping=True),
                      name='turtle-server')
    process.daemon = True
    process.start()

    # Send a ping message to the out process
    outbox.put(['ping'])
    msg = inbox.get(timeout=2.0)
    if msg != ['ping']:
        raise RuntimeError('wrong response from server: %s' % (msg,))

    return process
timeout.py 文件源码 项目:image_recognition 作者: tue-robotics 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, *args, **kwargs):
        """Execute the embedded function object asynchronously.

        The function given to the constructor is transparently called and
        requires that "ready" be intermittently polled. If and when it is
        True, the "value" property may then be checked for returned data.
        """
        self.__limit = kwargs.pop('timeout', self.__limit)
        self.__queue = multiprocessing.Queue(1)
        args = (self.__queue, self.__function) + args
        self.__process = multiprocessing.Process(target=_target,
                                                 args=args,
                                                 kwargs=kwargs)
        self.__process.daemon = True
        self.__process.start()
        self.__timeout = self.__limit + time.time()
        while not self.ready:
            time.sleep(0.01)
        return self.value


问题


面经


文章

微信
公众号

扫码关注公众号