python类Empty()的实例源码

sender.py 文件源码 项目:kinesis_producer 作者: ludia 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def run_once(self):
        """Accumulate records and flush when accumulator is ready."""
        try:
            record = self.queue.get(timeout=0.05)
        except queue.Empty:
            record = None
        else:
            success = self._accumulator.try_append(record)
            if not success:
                self.flush()
                success = self._accumulator.try_append(record)
                assert success, "Failed to accumulate even after flushing"

            self.queue.task_done()

        is_ready = self._accumulator.is_ready()
        force_flush = not self._running and record is None

        if is_ready or force_flush:
            self.flush()
es_index.py 文件源码 项目:gransk 作者: pcbje 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _commit(self):
    bulk = []
    stop = False

    while True:
      while len(bulk) < 50 and not stop:
        try:
          obj = self.elastic_bulk.get(timeout=3)
        except queue.Empty:
          break

        if obj is None:
          stop = True
        else:
          bulk.append(obj)

      if bulk:
        try:
          self.helper.bulk(self.elastic, bulk)
        except Exception as err:
          LOGGER.exception('es index error: %s', err)
        bulk = []

      if stop:
        break
cluster.py 文件源码 项目:deb-python-cassandra-driver 作者: openstack 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def run(self):
        while True:
            if self.is_shutdown:
                return

            try:
                while True:
                    run_at, i, task = self._queue.get(block=True, timeout=None)
                    if self.is_shutdown:
                        if task:
                            log.debug("Not executing scheduled task due to Scheduler shutdown")
                        return
                    if run_at <= time.time():
                        self._scheduled_tasks.discard(task)
                        fn, args, kwargs = task
                        kwargs = dict(kwargs)
                        future = self._executor.submit(fn, *args, **kwargs)
                        future.add_done_callback(self._log_if_failed)
                    else:
                        self._queue.put_nowait((run_at, i, task))
                        break
            except Queue.Empty:
                pass

            time.sleep(0.1)
future_full_pipeline.py 文件源码 项目:deb-python-cassandra-driver 作者: openstack 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def run(self):
        futures = queue.Queue(maxsize=121)

        self.start_profile()

        for i in range(self.num_queries):
            if i >= 120:
                old_future = futures.get_nowait()
                old_future.result()

            key = "{}-{}".format(self.thread_num, i)
            future = self.run_query(key)
            futures.put_nowait(future)

        while True:
            try:
                futures.get_nowait().result()
            except queue.Empty:
                break

        self.finish_profile
future_batches.py 文件源码 项目:deb-python-cassandra-driver 作者: openstack 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def run(self):
        futures = queue.Queue(maxsize=121)

        self.start_profile()

        for i in range(self.num_queries):
            if i > 0 and i % 120 == 0:
                # clear the existing queue
                while True:
                    try:
                        futures.get_nowait().result()
                    except queue.Empty:
                        break

            key = "{0}-{1}".format(self.thread_num, i)
            future = self.run_query(key)
            futures.put_nowait(future)

        while True:
            try:
                futures.get_nowait().result()
            except queue.Empty:
                break

        self.finish_profile()
urllist.py 文件源码 项目:icrawler 作者: hellock 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def worker_exec(self, queue_timeout=2, **kwargs):
        while True:
            if self.signal.get('reach_max_num'):
                self.logger.info('downloaded image reached max num, thread %s'
                                 ' exit', threading.current_thread().name)
                break
            try:
                url = self.in_queue.get(timeout=queue_timeout)
            except queue.Empty:
                if self.signal.get('feeder_exited'):
                    self.logger.info('no more page urls to parse, thread %s'
                                     ' exit', threading.current_thread().name)
                    break
                else:
                    self.logger.info('%s is waiting for new page urls',
                                     threading.current_thread().name)
                    continue
            except Exception as e:
                self.logger.error('exception caught in thread %s: %s',
                                  threading.current_thread().name, e)
                continue
            else:
                self.logger.debug('start downloading page {}'.format(url))
            self.output({'file_url': url})
client.py 文件源码 项目:etcd3-gateway 作者: dims 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def watch_once(self, key, timeout=None, **kwargs):
        """Watch a key and stops after the first event.

        :param key: key to watch
        :param timeout: (optional) timeout in seconds.
        :returns: event
        """
        event_queue = queue.Queue()

        def callback(event):
            event_queue.put(event)

        w = watch.Watcher(self, key, callback, **kwargs)
        try:
            return event_queue.get(timeout=timeout)
        except queue.Empty:
            raise exceptions.WatchTimedOut()
        finally:
            w.stop()
test_rpc.py 文件源码 项目:django-nameko 作者: and3rson 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_cluster_proxy_pool():
    with patch('django_nameko.rpc.ClusterRpcProxy') as FakeClusterRpcProxy:
        pool = rpc.ClusterRpcProxyPool(dict(), pool_size=2)
        pool.start()
        assert pool.queue.qsize() == 2

        with pool.next() as client:
            assert pool.queue.qsize() == 1

            client.foo.bar()
            assert call().start().foo.bar() in FakeClusterRpcProxy.mock_calls

            with pool.next():
                assert pool.queue.qsize() == 0

                tools.assert_raises(queue_six.Empty, pool.next, timeout=1)

            assert pool.queue.qsize() == 1
        assert pool.queue.qsize() == 2

        pool.stop()
event_source.py 文件源码 项目:InplusTrader_Linux 作者: zhengwsh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def events(self, start_date, end_date, frequency):
        running = True

        self.clock_engine_thread.start()
        self.quotation_engine_thread.start()

        while running:
            real_dt = datetime.datetime.now()
            while True:
                try:
                    dt, event_type = self.event_queue.get(timeout=1)
                    break
                except Empty:
                    continue

            system_log.debug("real_dt {}, dt {}, event {}", real_dt, dt, event_type)
            yield Event(event_type, real_dt, dt)
event_source.py 文件源码 项目:InplusTrader_Linux 作者: zhengwsh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def events(self, start_date, end_date, frequency):
        running = True

        self.clock_engine_thread.start()
        self.quotation_engine_thread.start()

        while running:
            real_dt = datetime.datetime.now()
            while True:
                try:
                    dt, event_type = self.event_queue.get(timeout=1)
                    break
                except Empty:
                    continue

            system_log.debug("real_dt {}, dt {}, event {}", real_dt, dt, event_type)
            yield Event(event_type, calendar_dt=real_dt, trading_dt=dt)
log_printer.py 文件源码 项目:universe 作者: openai 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def consume_queue(queue, cascade_stop):
    """Consume the queue by reading lines off of it and yielding them."""
    while True:
        try:
            item = queue.get(timeout=0.1)
        except Empty:
            yield None
            continue
        # See https://github.com/docker/compose/issues/189
        except thread.error:
            raise ShutdownException()

        if item.exc:
            raise item.exc

        if item.is_stop:
            if cascade_stop:
                raise StopIteration
            else:
                continue

        yield item.item
client.py 文件源码 项目:isc 作者: and3rson 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def run(self):
        self._is_running = True
        while self._is_running:
            if self.consumer.is_connected():
                producer = kombu.Producer(self.consumer._channel, on_return=self.consumer._handle_return)
                try:
                    queued_request = self._out_queue.get(timeout=0.5)
                    if True:
                        # with kombu.producers[self.consumer.get_connection()].acquire(block=True) as producer:
                        # producer.on_return = print
                        try:
                            self._dispatch_request(queued_request, producer)
                        except Exception as e:
                            # except ConnectionResetError:
                            log.debug('Failed to dispatch request, re-enqueueing again, error was: {}'.format(
                                str(e)
                            ))
                            self.enqueue(queued_request)
                except Empty:
                    continue
            else:
                sleep(0.5)
                log.debug('Waiting for consumer to be ready...')
app.py 文件源码 项目:boartty 作者: openstack 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def refresh(self, data=None, force=False):
        widget = self.frame.body
        while isinstance(widget, urwid.Overlay):
            widget = widget.contents[0][0]
        interested = force
        invalidate = False
        try:
            while True:
                event = self.sync.result_queue.get(0)
                if widget.interested(event):
                    interested = True
                if hasattr(event, 'held_changed') and event.held_changed:
                    invalidate = True
        except queue.Empty:
            pass
        if interested:
            widget.refresh()
        if invalidate:
            self.updateStatusQueries()
        self.status.refresh()
task_queue.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get(self):
        '''Get a task from queue when bucket available'''
        if self.bucket.get() < 1:
            return None
        now = time.time()
        self.mutex.acquire()
        try:
            task = self.priority_queue.get_nowait()
            self.bucket.desc()
        except Queue.Empty:
            self.mutex.release()
            return None
        task.exetime = now + self.processing_timeout
        self.processing.put(task)
        self.mutex.release()
        return task.taskid
scheduler.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _check_task_done(self):
        '''Check status queue'''
        cnt = 0
        try:
            while True:
                task = self.status_queue.get_nowait()
                # check _on_get_info result here
                if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
                    if task['project'] not in self.projects:
                        continue
                    project = self.projects[task['project']]
                    project.on_get_info(task['track'].get('save') or {})
                    logger.info(
                        '%s on_get_info %r', task['project'], task['track'].get('save', {})
                    )
                    continue
                elif not self.task_verify(task):
                    continue
                self.on_task_status(task)
                cnt += 1
        except Queue.Empty:
            pass
        return cnt
result_worker.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def run(self):
        '''Run loop'''
        logger.info("result_worker starting...")

        while not self._quit:
            try:
                task, result = self.inqueue.get(timeout=1)
                self.on_result(task, result)
            except Queue.Empty as e:
                continue
            except KeyboardInterrupt:
                break
            except AssertionError as e:
                logger.error(e)
                continue
            except Exception as e:
                logger.exception(e)
                continue

        logger.info("result_worker exiting...")
redis_queue.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get(self, block=True, timeout=None):
        if not block:
            return self.get_nowait()

        start_time = time.time()
        while True:
            try:
                return self.get_nowait()
            except self.Empty:
                if timeout:
                    lasted = time.time() - start_time
                    if timeout > lasted:
                        time.sleep(min(self.max_timeout, timeout - lasted))
                    else:
                        raise
                else:
                    time.sleep(self.max_timeout)
rabbitmq.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get(self, block=True, timeout=None, ack=False):
        if not block:
            return self.get_nowait()

        start_time = time.time()
        while True:
            try:
                return self.get_nowait(ack)
            except BaseQueue.Empty:
                if timeout:
                    lasted = time.time() - start_time
                    if timeout > lasted:
                        time.sleep(min(self.max_timeout, timeout - lasted))
                    else:
                        raise
                else:
                    time.sleep(self.max_timeout)
beanstalk.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def get(self, block=True, timeout=None):
        if not block:
            return self.get_nowait()

        start_time = time.time()
        while True:
            try:
                return self.get_nowait()
            except BaseQueue.Empty:
                if timeout:
                    lasted = time.time() - start_time
                    if timeout > lasted:
                        time.sleep(min(self.max_timeout, timeout - lasted))
                    else:
                        raise
                else:
                    time.sleep(self.max_timeout)
processor.py 文件源码 项目:Url 作者: beiruan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def run(self):
        '''Run loop'''
        logger.info("processor starting...")

        while not self._quit:
            try:
                task, response = self.inqueue.get(timeout=1)
                self.on_task(task, response)
                self._exceptions = 0
            except Queue.Empty as e:
                continue
            except KeyboardInterrupt:
                break
            except Exception as e:
                logger.exception(e)
                self._exceptions += 1
                if self._exceptions > self.EXCEPTION_LIMIT:
                    break
                continue

        logger.info("processor exiting...")
client.py 文件源码 项目:python-etcd3 作者: kragniz 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def add_watch_callback(self, *args, **kwargs):
        """
        Watch a key or range of keys and call a callback on every event.

        If timeout was declared during the client initialization and
        the watch cannot be created during that time the method raises
        a ``WatchTimedOut`` exception.

        :param key: key to watch
        :param callback: callback function

        :returns: watch_id. Later it could be used for cancelling watch.
        """
        try:
            return self.watcher.add_callback(*args, **kwargs)
        except queue.Empty:
            raise exceptions.WatchTimedOut()
client.py 文件源码 项目:python-etcd3 作者: kragniz 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def watch_once(self, key, timeout=None, **kwargs):
        """
        Watch a key and stops after the first event.

        If the timeout was specified and event didn't arrived method
        will raise ``WatchTimedOut`` exception.

        :param key: key to watch
        :param timeout: (optional) timeout in seconds.
        :returns: ``Event``
        """
        event_queue = queue.Queue()

        def callback(event):
            event_queue.put(event)

        watch_id = self.add_watch_callback(key, callback, **kwargs)

        try:
            return event_queue.get(timeout=timeout)
        except queue.Empty:
            raise exceptions.WatchTimedOut()
        finally:
            self.cancel_watch(watch_id)
transcribe_streaming.py 文件源码 项目:chordspeak 作者: nyboer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _audio_data_generator(buff):
    """A generator that yields all available data in the given buffer.

    Args:
        buff - a Queue object, where each element is a chunk of data.
    Yields:
        A chunk of data that is the aggregate of all chunks of data in `buff`.
        The function will block until at least one data chunk is available.
    """
    while True:
        # Use a blocking get() to ensure there's at least one chunk of data
        chunk = buff.get()
        if not chunk:
            # A falsey value indicates the stream is closed.
            break
        data = [chunk]

        # Now consume whatever other data's still buffered.
        while True:
            try:
                data.append(buff.get(block=False))
            except queue.Empty:
                break
        yield b''.join(data)
transcribe_streaming.py 文件源码 项目:hackfair-speech 作者: DjangoGirlsSeoul 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _audio_data_generator(buff):
    """A generator that yields all available data in the given buffer.
    Args:
        buff - a Queue object, where each element is a chunk of data.
    Yields:
        A chunk of data that is the aggregate of all chunks of data in `buff`.
        The function will block until at least one data chunk is available.
    """
    while True:
        # Use a blocking get() to ensure there's at least one chunk of data
        chunk = buff.get()
        if not chunk:
            # A falsey value indicates the stream is closed.
            break
        data = [chunk]

        # Now consume whatever other data's still buffered.
        while True:
            try:
                data.append(buff.get(block=False))
            except queue.Empty:
                break
        yield b''.join(data)
a3c.py 文件源码 项目:noreward-rl 作者: pathak22 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def pull_batch_from_queue(self):
        """
        Take a rollout from the queue of the thread runner.
        """
        # get top rollout from queue (FIFO)
        rollout = self.runner.queue.get(timeout=600.0)
        while not rollout.terminal:
            try:
                # Now, get remaining *available* rollouts from queue and append them into
                # the same one above. If queue.Queue(5): len=5 and everything is
                # superfast (not usually the case), then all 5 will be returned and
                # exception is raised. In such a case, effective batch_size would become
                # constants['ROLLOUT_MAXLEN'] * queue_maxlen(5). But it is almost never the
                # case, i.e., collecting  a rollout of length=ROLLOUT_MAXLEN takes more time
                # than get(). So, there are no more available rollouts in queue usually and
                # exception gets always raised. Hence, one should keep queue_maxlen = 1 ideally.
                # Also note that the next rollout generation gets invoked automatically because
                # its a thread which is always running using 'yield' at end of generation process.
                # To conclude, effective batch_size = constants['ROLLOUT_MAXLEN']
                rollout.extend(self.runner.queue.get_nowait())
            except queue.Empty:
                break
        return rollout
file_pusher.py 文件源码 项目:client 作者: wandb 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _thread_body(self):
        while True:
            event = self._queue.get()
            if isinstance(event, EventFinish):
                break
            self._handle_event(event)

        while True:
            try:
                event = self._queue.get(True, 1)
            except queue.Empty:
                event = None
            if event:
                self._handle_event(event)
            elif not self._jobs:
                # Queue was empty and no jobs left.
                break
background_thread.py 文件源码 项目:opencensus-python 作者: census-instrumentation 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _get_items(self):
        """Get multiple items from a Queue.

        Gets at least one (blocking) and at most ``max_items`` items
        (non-blocking) from a given Queue. Does not mark the items as done.

        :rtype: Sequence
        :returns: A sequence of items retrieved from the queue.
        """
        items = [self._queue.get()]

        while len(items) < self._max_batch_size:
            try:
                items.append(self._queue.get_nowait())
            except queue.Empty:
                break

        return items
connection_pool.py 文件源码 项目:flask-nameko 作者: jessepollak 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _get_connection_from_queue(self, initial_timeout, next_timeout):
        try:
            return self._queue.get(True, initial_timeout)
        except Empty:
            try:
                self._lock.acquire()
                if self._current_connections == self._max_connections:
                    raise ClientUnavailableError("Too many connections in use")
                cb = self._make_connection()
                return cb
            except ClientUnavailableError as ex:
                try:
                    return self._queue.get(True, next_timeout)
                except Empty:
                    raise ex
            finally:
                self._lock.release()
TFNode.py 文件源码 项目:TensorFlowOnSpark 作者: yahoo 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def terminate(self):
    """Terminate data feeding early.

    Since TensorFlow applications can often terminate on conditions unrelated to the training data (e.g. steps, accuracy, etc),
    this method signals the data feeding process to ignore any further incoming data.  Note that Spark itself does not have a mechanism
    to terminate an RDD operation early, so the extra partitions will still be sent to the executors (but will be ignored).  Because
    of this, you should size your input data accordingly to avoid excessive overhead.
    """
    logging.info("terminate() invoked")
    self.mgr.set('state', 'terminating')

    # drop remaining items in the queue
    queue = self.mgr.get_queue(self.qname_in)
    count = 0
    done = False
    while not done:
      try:
        queue.get(block=True, timeout=5)
        queue.task_done()
        count += 1
      except Empty:
        logging.info("dropped {0} items from queue".format(count))
        done = True
event_source.py 文件源码 项目:rqalpha 作者: ricequant 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def events(self, start_date, end_date, frequency):
        running = True

        self.clock_engine_thread.start()

        if not self.mod_config.redis_uri:
            self.quotation_engine_thread.start()

        while running:
            real_dt = datetime.datetime.now()
            while True:
                try:
                    dt, event_type = self.event_queue.get(timeout=1)
                    break
                except Empty:
                    continue

            system_log.debug("real_dt {}, dt {}, event {}", real_dt, dt, event_type)
            yield Event(event_type, calendar_dt=real_dt, trading_dt=dt)


问题


面经


文章

微信
公众号

扫码关注公众号