python类queue()的实例源码

crawl.py 文件源码 项目:girlfriend 作者: chihongze 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _concurrent_execute(self, context, start_req, parser, pool, pool_size):
        queue = Queue()  # ????

        # ????????????
        for r in start_req:
            queue.put_nowait(r)

        if pool is None:
            pool = GeventPool(pool_size)

        greenlets = []

        while True:
            try:
                req = self._check_req(queue.get(timeout=1))
                if req.parser is None:
                    req.parser = parser
                greenlets.append(pool.spawn(req, context, queue))
            except Empty:
                break

        return [greenlet.get() for greenlet in greenlets]
test_gipc.py 文件源码 项目:gipc 作者: jgehrcke 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def usecase_child_d(forthreader, backwriter):
    recvqueue = gevent.queue.Queue()
    def g_from_forthpipe_to_q(forthreader):
        while True:
            m = forthreader.get()
            recvqueue.put(m)
            if m == "STOP":
                break

    def g_from_q_to_backpipe(backwriter):
        while True:
            m = recvqueue.get()
            backwriter.put(m)
            if m == "STOP":
                break

    g1 = gevent.spawn(g_from_forthpipe_to_q, forthreader)
    g2 = gevent.spawn(g_from_q_to_backpipe, backwriter)
    g1.get()
    g2.get()
sflowtool.py 文件源码 项目:vaping 作者: 20c 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def probe(self):
        self.log.debug("stdout queue %d" % self.stdout_queue.qsize())
        if not self.stdout_queue.qsize():
            return {}

        data = []
        try:
        # OPT skip_fields
            while True:
                line = self.stdout_queue.get_nowait()
                data.append(line)

        except gevent.queue.Empty as e:
            pass

        msg = {}
        msg['data'] = data
        msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
        return msg
echo_node.py 文件源码 项目:raiden 作者: raiden-network 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def echo_worker(self):
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug('echo worker', qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        'duplicate transfer ignored',
                        initiator=pex(transfer['initiator']),
                        amount=transfer['amount'],
                        identifier=transfer['identifier']
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.append(gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(.5)
gevent.py 文件源码 项目:oa_qian 作者: sunqb 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def stop(self):
        """Stop the greenlet workers and empty all queues."""
        with self._state_change:
            if not self._running:
                return

            self._running = False

            for queue in (self.callback_queue,):
                queue.put(_STOP)

            while self._workers:
                worker = self._workers.pop()
                worker.join()

            # Clear the queues
            self.callback_queue = Queue()  # pragma: nocover

            python2atexit.unregister(self.stop)
improve_location_with_search.py 文件源码 项目:handelsregister 作者: Amsterdam 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def async_determine_rd_coordinates():
    """
    Worker task which gets
    search parameters of the queue
    and executes a SearchTask
    """

    while not SEARCHES_QUEUE.empty():
        args = SEARCHES_QUEUE.get()
        task = SearchTask(*args)
        try:
            task.determine_rd_coordinates()
        except Exception as exp:
            # when tasks fails.. continue..
            log.error('\n\n\n')
            log.error(exp)
            log.error('\n\n\n')
test_runners.py 文件源码 项目:aiolocust 作者: kpidata 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def mocked_rpc_server():
    class MockedRpcServer(object):
        queue = Queue()
        outbox = []

        def __init__(self, host, port):
            pass

        @classmethod
        def mocked_send(cls, message):
            cls.queue.put(message.serialize())

        def recv(self):
            results = self.queue.get()
            return Message.unserialize(results)

        def send(self, message):
            self.outbox.append(message.serialize())

    return MockedRpcServer
data_agent.py 文件源码 项目:ops_agent 作者: sjqzhang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def processJsonRep(self,socket, address):
        org = self.conf.get('base','client_id')
        jsonSocket = jsonSession(socket=socket,org=org)
        while 1:
            try:
                code, data = jsonSocket.recv()
                if code != 0:
                    logger.error("local receive error (%s %s)"%(code, data))
                    socket.close()
                    break
                try:
                    _reportQueue.put_nowait(data)
                except gevent.queue.Full:
                    logger.error("report queue is full")
                    jsonSocket.send_response(conf.global_vars.ErrCode.QueueFull, 'ok')
                    continue
                jsonSocket.send_response(0, 'ok')
            except Exception, e:
                logger.error("uncaught error, e={}, traceback={}".format(e, traceback.format_exc()))
                socket.close()
                break
data_agent.py 文件源码 项目:ops_agent 作者: sjqzhang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def processRep(self,socket, address):
        org = self.conf.get('base', 'client_id')
        pbSocket = pbSession(socket=socket,org=org)
        while 1:
            try:
                code, data = pbSocket.recv(decode=False)
                if code != 0:
                    if "connection closed" not in data:
                        logger.error("local receive error (%s %s)"%(code, data))
                    socket.close()
                    break
                try:
                    _reportQueue.put_nowait(data)
                except gevent.queue.Full:
                    logger.error("report queue is full")
                    pbSocket.send_response(conf.global_vars.ErrCode.QueueFull, 'ok')
                    continue
                pbSocket.send_response(0, 'ok')
            except Exception, e:
                logger.error("uncaught error, e={}, traceback={}".format(e, traceback.format_exc()))
                socket.close()
                break
data_agent.py 文件源码 项目:ops_agent 作者: sjqzhang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def enqueue(self, queue_event_list, max_queued_messages):
        if len(queue_event_list) == 0:
            return

        while True:
            try:
                # get msg
                task_msg = _reportQueue.get()
                if not task_msg:
                    continue
                dataid, org, ip = task_msg[0][-3:]
                logger.debug('recv msg, org: %s dataid: %s' %(org, dataid))
                # enqueue
                for (q, flush_ready_event) in queue_event_list:
                    if not q.full():
                        q.put_nowait(task_msg)
                    else:
                        logger.error("queue full")
                    if q.qsize() >= max_queued_messages and not flush_ready_event.is_set():
                        flush_ready_event.set()
            except Exception, e:
                logger.error(e)
subDomainsBrute.py 文件源码 项目:subDomainsBrute 作者: 0xa-saline 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        sys.stdout.flush()
gevent.py 文件源码 项目:deb-kazoo 作者: openstack 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def stop(self):
        """Stop the greenlet workers and empty all queues."""
        with self._state_change:
            if not self._running:
                return

            self._running = False

            for queue in (self.callback_queue,):
                queue.put(_STOP)

            while self._workers:
                worker = self._workers.pop()
                worker.join()

            # Clear the queues
            self.callback_queue = Queue()  # pragma: nocover

            python2atexit.unregister(self.stop)
pipeline.py 文件源码 项目:pipeline 作者: alexlemann 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def pipeline(stages, initial_data):
    monitors = Group()
    # Make sure items in initial_data are iterable.
    if not isinstance(initial_data, types.GeneratorType):
        try:
            iter(initial_data)
        except:
            raise TypeError('initial_data must be iterable')
    # The StopIteration will bubble through the queues as it is reached.
    #   Once a stage monitor sees it, it indicates that the stage will read
    #   no more data and the monitor can wait for the current work to complete
    #   and clean up.
    if hasattr(initial_data, 'append'):
        initial_data.append(StopIteration)
    if not stages:
        return PipelineResult(monitors, [])
    # chain stage queue io
    #  Each stage shares an output queue with the next stage's input.
    qs = [initial_data] + [Queue() for _ in range(len(stages))]
    for stage, in_q, out_q in zip(stages, qs[:-1], qs[1:]):
        stage.in_q = in_q
        stage.out_q = out_q
        monitors.spawn(stage_monitor, stage)
    gevent.sleep(0)
    return PipelineResult(monitors, stages[-1].out_q)
subdomain.py 文件源码 项目:SiteScan 作者: jasonsheh 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def __init__(self, target, id=''):
        self.target = target
        self.id = id
        self.ip = []
        self.dns_ip = ['1.1.1.1', '127.0.0.1', '0.0.0.0', '202.102.110.203', '202.102.110.204',
                       '220.250.64.225']
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0'}
        self.queue = Queue()
        self.thread_num = 60
        self.c_count = {}
        self.domain = []
        self.domains = {}
        self.title = {}
        self.appname = {}
        self.removed_domains = []
        self.init()
subdomain.py 文件源码 项目:SiteScan 作者: jasonsheh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def remove_error_domain(self):
        while not self.queue.empty():
            domain = self.queue.get()
            try:
                r = requests.get('http://' + domain, timeout=4, allow_redirects=False)
                if r.status_code not in [400, 403, 500]:
                    continue
            except requests.exceptions.ConnectTimeout:
                self.removed_domains.append(domain)
                continue
            except requests.exceptions.ConnectionError:
                self.removed_domains.append(domain)
                continue
            except requests.exceptions.TooManyRedirects:
                self.removed_domains.append(domain)
                continue
            except requests.exceptions.ReadTimeout:
                self.removed_domains.append(domain)
                continue
            except:
                continue
subdomain.py 文件源码 项目:SiteScan 作者: jasonsheh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def remove_error_subdomain(self, d):
        while not self.queue.empty():
            domain = self.queue.get()
            domain = 'this_subdomain_will_never_exist' + '.' + domain
            resolvers = dns.resolver.Resolver(configure=False)
            resolvers.nameservers = [self.dns[d % len(self.dns)]]
            resolvers.timeout = 10.0
            try:
                answers = dns.resolver.query(domain)
                ips = [answer.address for answer in answers]
                for ip in ips:
                    if ip in self.dns_ip:
                        continue
                    self.removed_domains.append(domain)
            except dns.resolver.NXDOMAIN:
                pass
            except dns.resolver.NoAnswer:
                pass
            except dns.exception.Timeout:
                pass
            except:
                pass
subdomain.py 文件源码 项目:SiteScan 作者: jasonsheh 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sub_brute(self, d):
        while not self.queue.empty():
            domain = self.queue.get()
            resolvers = dns.resolver.Resolver(configure=False)
            resolvers.nameservers = [self.dns[d % len(self.dns)]]
            resolvers.timeout = 10.0
            try:
                sys.stdout.write('\r????: '+str(len(self.domains.keys()))+'?????: '+str(self.queue.qsize()))
                sys.stdout.flush()
                answers = resolvers.query(domain)
                ips = [answer.address for answer in answers]
                for ip in ips:
                    if ip not in self.dns_ip:
                        if domain in self.domains.keys() and ip not in self.domains[domain]:
                            self.domains[domain].append(ip)
                        else:
                            self.domains[domain] = [ip]
            except dns.resolver.NXDOMAIN:
                continue
            except dns.resolver.NoAnswer:
                continue
            except dns.name.EmptyLabel:
                continue
            except dns.exception.Timeout:
                continue
sendir.py 文件源码 项目:SiteScan 作者: jasonsheh 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def directory_brute(self):
        '''
        ???????????
        :return:
        '''
        while not self.queue.empty():
            _dir = self.queue.get()
            for target in self.targets:
                try:
                    url = target + _dir
                    self.count += 1
                    sys.stdout.write('\r?????: ' + str(self.count))
                    sys.stdout.flush()
                    r = requests.get('http://' + target + _dir, allow_redirects=False)

                    if r.status_code in [200, 403]:
                        self.sensitive[url] = r.status_code
                except requests.exceptions.ReadTimeout:
                    continue
                except requests.exceptions.ConnectionError:
                    continue
                except requests.exceptions.TooManyRedirects:
                    continue
dispatcher.py 文件源码 项目:reddit-service-websockets 作者: reddit 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def listen(self, namespace, max_timeout):
        """Register to listen to a namespace and yield messages as they arrive.

        If no messages arrive within `max_timeout` seconds, this will yield a
        `None` to allow clients to do periodic actions like send PINGs.

        This will run forever and yield items as an iterable. Use it in a loop
        and break out of it when you want to deregister.

        """
        queue = gevent.queue.Queue()

        namespace = namespace.rstrip("/")
        for ns in _walk_namespace_hierarchy(namespace):
            self.consumers.setdefault(ns, []).append(queue)

        try:
            while True:
                # jitter the timeout a bit to ensure we don't herd
                timeout = max_timeout - random.uniform(0, max_timeout / 2)

                try:
                    yield queue.get(block=True, timeout=timeout)
                except gevent.queue.Empty:
                    yield None

                # ensure we're not starving others by spinning
                gevent.sleep()
        finally:
            for ns in _walk_namespace_hierarchy(namespace):
                self.consumers[ns].remove(queue)
                if not self.consumers[ns]:
                    del self.consumers[ns]
mailbox.py 文件源码 项目:Pyrlang 作者: esl 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get(self):
        """ Receives ANY message whatever is the first in the queue. Blocks the
            greenlet if the queue is empty. Other greenlets will continue
            to run.
        """
        return self.queue_.get()
mailbox.py 文件源码 项目:Pyrlang 作者: esl 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_nowait(self):
        """ Receives ANY message whatever is the first or raises.

            :raises queue.Empty: If the queue is empty
        """
        return self.queue_.get_nowait()
mailbox.py 文件源码 项目:Pyrlang 作者: esl 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def receive_wait(self, filter_fn: Callable):
        """ Repeatedly call receive(filter) until the result is found. Other
            greenlets can continue to run cooperatively.

            :param filter_fn: A callable which checks if message is desired
                (and returns True) or should be skipped (and returns False)
        """
        while True:
            LOG(self.queue_.queue)

            m = self.receive(filter_fn=filter_fn)
            if m is not None:
                return m

            gevent.sleep(0.0)
mailbox.py 文件源码 项目:Pyrlang 作者: esl 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def receive(self, filter_fn: Callable):
        """ Apply filter to all messages in the inbox, first message for which
            filter returns True will be returned.

            :param filter_fn: A callable which checks if message is desired
                (and returns True) or should be skipped (and returns False)
            :returns: Message, if the filter returned True, otherwise ``None``
                if no message matches or the mailbox was empty
        """
        if self.queue_.empty():
            return None

        # try every element in the queue, get it, check it, place it into the
        # queue end (NOTE: This will mix the messages breaking the order)
        try:
            for i in range(len(self.queue_)):
                m = self.queue_.get_nowait()

                if filter_fn(m):
                    LOG("Mailbox: match return", m)
                    return m

                self.queue_.put(m)

        except queue.Empty:
            pass

        return None
crawl.py 文件源码 项目:girlfriend 作者: chihongze 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _default_parser(context, response, queue):
    """???Response?????
    """
    content_type = response.headers["content-type"]
    if content_type.startswith("application/json"):
        return response.json()
    else:
        return response.text
crawl.py 文件源码 项目:girlfriend 作者: chihongze 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _sync_execute(self, context, start_req, parser):
        queue = list(start_req)
        result = []
        while queue:
            req = queue.pop(0)
            req = self._check_req(req)
            if req.parser is None:
                req.parser = parser
            result.append(req(context, queue))
        return result
crawl.py 文件源码 项目:girlfriend 作者: chihongze 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, context, queue):
        """
        :param context: ???
        :param queue: ????
        """
        try:
            response = self.method(*self.args, **self.kws)
            result = self.parser(context, response, queue)
            if self.sleep:
                gevent.sleep(self.sleep)
            return result
        except:
            context.logger.exception(u"crawl error")
            return sys.exc_info()
connection_pool.py 文件源码 项目:kingpin 作者: pinterest 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, pool_name, pool_size, close_conn_f, conn_cls,
                 *conn_args, **conn_kwargs):
        """Constructor.

        Args:
            pool_name: name of the pool.
            pool_size: max number of connections to create in the pool.
            close_conn_f: function to close a connection. It should take
            exactly one argument which is an object returned by conn_cls.
            conn_cls: python class or function for creating a connection.
            conn_args, conn_kwargs: arguments passed to conn_cls to
            create a connection.

        """
        self.pool_name = pool_name
        self.pool_size = pool_size
        assert close_conn_f is None or hasattr(close_conn_f, '__call__')
        self.close_conn_f = close_conn_f
        assert hasattr(conn_cls, '__call__')
        self.conn_cls = conn_cls
        self.conn_args = conn_args
        self.conn_kwargs = conn_kwargs
        # The number of connections in the pool that are ever used,
        # e.g. total unique number of connections returned by get().
        # This is the maximum number of concurrent connections ever reached.
        self.num_connected = 0

        self._queue = gevent.queue.LifoQueue(maxsize=pool_size)

        for i in xrange(0, pool_size):
            # Pre-populate the pool with connection holders.
            self._queue.put(ConnectionHolder(pool_name))

        # Run garbage collection on unused connections.
        # Randomize the GC job start time.
        start_after_secs = random.randint(0, 1000 * GC_INTERVAL_SECS) / 1000.0
        self._gc_job = Periodical("ConnPool-GC-%s" % pool_name,
                                  GC_INTERVAL_SECS, start_after_secs,
                                  self._gc_unused_conn, MAX_CONN_AGE_SECS)

        self.desc = self._get_desc()
connection_pool.py 文件源码 项目:kingpin 作者: pinterest 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def get(self, block=True, timeout=None):
        """Get a connection holder with connection object (conn) populated.

        Args:
            block: whether to wait if queue is empty.
            timeout: the max seconds to wait. If no connection is available
            after timeout, a gevent.queue.Empty exception is thrown.

        Returns:
            a ConnectionHolder object with conn populated.

        """
        conn_holder = self._queue.get(block, timeout)
        if conn_holder.conn is None:
            tm = None
            try:
                # In case self._create_conn() blocks, it should block for max
                # timeout seconds.
                tm = gevent.Timeout.start_new(timeout, gevent.queue.Empty)
                conn_holder.set_conn(self._create_conn())
            except:
                # If we fail to create a connection, we put conn_holder back
                # and re-raise the exception.
                conn_holder.set_conn(None)
                self.put(conn_holder)
                raise
            finally:
                if tm:
                    tm.cancel()

            self.num_connected += 1

        conn_holder.last_access_time = time.time()
        return conn_holder
connection_pool.py 文件源码 项目:kingpin 作者: pinterest 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def put(self, conn_holder, replace=False):
        """Put back the conn_holder (returned by get()) in queue.

        Args:
            conn_holder: connection holder returned by get()
            replace: whether to create a new replacement for this connection.

        """
        assert self._queue.qsize() < self.pool_size
        assert conn_holder.pool_name == self.pool_name

        if replace:
            self._close_conn(conn_holder)

        self._queue.put_nowait(conn_holder)
connection_pool.py 文件源码 项目:kingpin 作者: pinterest 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def qsize(self):
        """Return the free objects in the queue."""
        return self._queue.qsize()


问题


面经


文章

微信
公众号

扫码关注公众号