python类sleep()的实例源码

locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_notify_n_with_timeout(self):
        # Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
        # Wait for that timeout to expire, then do notify(2) and make
        # sure everyone runs. Verifies that a timed-out callback does
        # not count against the 'n' argument to notify().
        c = locks.Condition()
        self.record_done(c.wait(), 0)
        self.record_done(c.wait(timedelta(seconds=0.01)), 1)
        self.record_done(c.wait(), 2)
        self.record_done(c.wait(), 3)

        # Wait for callback 1 to time out.
        yield gen.sleep(0.02)
        self.assertEqual(['timeout'], self.history)

        c.notify(2)
        yield gen.sleep(0.01)
        self.assertEqual(['timeout', 0, 2], self.history)
        self.assertEqual(['timeout', 0, 2], self.history)
        c.notify()
        self.assertEqual(['timeout', 0, 2, 3], self.history)
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        sem = locks.Semaphore(value=0)
        futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]

        future = sem.acquire()
        self.assertEqual(102, len(sem._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(sem._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        sem.release()
        self.assertTrue(future.done())

        # Prevent "Future exception was never retrieved" messages.
        for future in futures:
            self.assertRaises(TimeoutError, future.result)
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_context_manager_contended(self):
        sem = locks.Semaphore()
        history = []

        @gen.coroutine
        def f(index):
            with (yield sem.acquire()):
                history.append('acquired %d' % index)
                yield gen.sleep(0.01)
                history.append('release %d' % index)

        yield [f(i) for i in range(2)]

        expected_history = []
        for i in range(2):
            expected_history.extend(['acquired %d' % i, 'release %d' % i])

        self.assertEqual(expected_history, history)
queues_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_task_done(self):
        q = self.queue_class()
        for i in range(100):
            q.put_nowait(i)

        self.accumulator = 0

        @gen.coroutine
        def worker():
            while True:
                item = yield q.get()
                self.accumulator += item
                q.task_done()
                yield gen.sleep(random() * 0.01)

        # Two coroutines share work.
        worker()
        worker()
        yield q.join()
        self.assertEqual(sum(range(100)), self.accumulator)
iostream_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_streaming_until_close_future(self):
        server, client = self.make_iostream_pair()
        try:
            chunks = []

            @gen.coroutine
            def client_task():
                yield client.read_until_close(streaming_callback=chunks.append)

            @gen.coroutine
            def server_task():
                yield server.write(b"1234")
                yield gen.sleep(0.01)
                yield server.write(b"5678")
                server.close()

            @gen.coroutine
            def f():
                yield [client_task(), server_task()]
            self.io_loop.run_sync(f)
            self.assertEqual(chunks, [b"1234", b"5678"])
        finally:
            server.close()
            client.close()
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_notify_n_with_timeout(self):
        # Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
        # Wait for that timeout to expire, then do notify(2) and make
        # sure everyone runs. Verifies that a timed-out callback does
        # not count against the 'n' argument to notify().
        c = locks.Condition()
        self.record_done(c.wait(), 0)
        self.record_done(c.wait(timedelta(seconds=0.01)), 1)
        self.record_done(c.wait(), 2)
        self.record_done(c.wait(), 3)

        # Wait for callback 1 to time out.
        yield gen.sleep(0.02)
        self.assertEqual(['timeout'], self.history)

        c.notify(2)
        yield gen.sleep(0.01)
        self.assertEqual(['timeout', 0, 2], self.history)
        self.assertEqual(['timeout', 0, 2], self.history)
        c.notify()
        self.assertEqual(['timeout', 0, 2, 3], self.history)
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        c = locks.Condition()
        for _ in range(101):
            c.wait(timedelta(seconds=0.01))

        future = c.wait()
        self.assertEqual(102, len(c._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(c._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        c.notify()
        self.assertTrue(future.done())
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        sem = locks.Semaphore(value=0)
        futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]

        future = sem.acquire()
        self.assertEqual(102, len(sem._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(sem._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        sem.release()
        self.assertTrue(future.done())

        # Prevent "Future exception was never retrieved" messages.
        for future in futures:
            self.assertRaises(TimeoutError, future.result)
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_context_manager_contended(self):
        sem = locks.Semaphore()
        history = []

        @gen.coroutine
        def f(index):
            with (yield sem.acquire()):
                history.append('acquired %d' % index)
                yield gen.sleep(0.01)
                history.append('release %d' % index)

        yield [f(i) for i in range(2)]

        expected_history = []
        for i in range(2):
            expected_history.extend(['acquired %d' % i, 'release %d' % i])

        self.assertEqual(expected_history, history)
queues_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def test_task_done(self):
        q = self.queue_class()
        for i in range(100):
            q.put_nowait(i)

        self.accumulator = 0

        @gen.coroutine
        def worker():
            while True:
                item = yield q.get()
                self.accumulator += item
                q.task_done()
                yield gen.sleep(random() * 0.01)

        # Two coroutines share work.
        worker()
        worker()
        yield q.join()
        self.assertEqual(sum(range(100)), self.accumulator)
iostream_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_streaming_until_close_future(self):
        server, client = self.make_iostream_pair()
        try:
            chunks = []

            @gen.coroutine
            def client_task():
                yield client.read_until_close(streaming_callback=chunks.append)

            @gen.coroutine
            def server_task():
                yield server.write(b"1234")
                yield gen.sleep(0.01)
                yield server.write(b"5678")
                server.close()

            @gen.coroutine
            def f():
                yield [client_task(), server_task()]
            self.io_loop.run_sync(f)
            self.assertEqual(chunks, [b"1234", b"5678"])
        finally:
            server.close()
            client.close()
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        c = locks.Condition()
        for _ in range(101):
            c.wait(timedelta(seconds=0.01))

        future = c.wait()
        self.assertEqual(102, len(c._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(c._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        c.notify()
        self.assertTrue(future.done())
locks_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        sem = locks.Semaphore(value=0)
        futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]

        future = sem.acquire()
        self.assertEqual(102, len(sem._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(sem._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        sem.release()
        self.assertTrue(future.done())

        # Prevent "Future exception was never retrieved" messages.
        for future in futures:
            self.assertRaises(TimeoutError, future.result)
queues_test.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_task_done(self):
        q = self.queue_class()
        for i in range(100):
            q.put_nowait(i)

        self.accumulator = 0

        @gen.coroutine
        def worker():
            while True:
                item = yield q.get()
                self.accumulator += item
                q.task_done()
                yield gen.sleep(random() * 0.01)

        # Two coroutines share work.
        worker()
        worker()
        yield q.join()
        self.assertEqual(sum(range(100)), self.accumulator)
test_subscription.py 文件源码 项目:mentos 作者: daskos 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_bad_subscription(io_loop, mocker):
    framework_info = {}

    handler = mocker.Mock()
    handlers = {Event.SUBSCRIBED: handler,
                Event.HEARTBEAT: handler,
                Event.OFFERS: handler,
                Event.SHUTDOWN: handler}

    sub = Subscription(framework_info, 'zk://localhost:2181',
                       '/api/v1/scheduler', handlers, timeout=1, loop=io_loop)

    assert sub.state.current_state == States.CLOSED
    yield sub.start()
    yield gen.sleep(5)
    assert sub.state.current_state == States.CLOSED
retry.py 文件源码 项目:mentos 作者: daskos 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def enforce(self, request=None):
        self.timings[id(request)].append(time.time())

        tries = len(self.timings[id(request)])
        if tries == 1:
            return

        if self.try_limit is not None and tries >= self.try_limit:
            raise FailedRetry

        wait_time = self.sleep_func(self.timings[id(request)])
        if wait_time is None or wait_time == 0:
            return
        elif wait_time < 0:
            raise FailedRetry

        log.debug("Waiting %d seconds until next try.", wait_time)
        yield gen.sleep(wait_time)
__main__.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def patch(self, udid):
        data = tornado.escape.json_decode(self.request.body)
        id = data['id']
        timeout = float(data.get('timeout', 20.0))
        print 'Timeout:', timeout
        result = self.results.get(id)
        if result is None:
            self.results[id] = self
            yield gen.sleep(timeout)
            if self.results.get(id) == self:
                del(self.results[id])
                self.write('null')
                self.finish()
        else:
            self.write(json.dumps(result))
            self.results.pop(id, None)
handlers.py 文件源码 项目:nbrsessionproxy 作者: jupyterhub 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def proxy(self, port, path):
        if not path.startswith('/'):
            path = '/' + path

        # if we're in 'starting' let's wait a while
        for i in range(5):
            if not self.state.get('starting', False):
                break
            # Simple exponential backoff
            wait_time = max(1.4 ** i, 5)
            self.log.debug('Waiting {} before checking if rstudio is up'.format(wait_time))
            yield gen.sleep(wait_time)
        else:
            raise web.HTTPError('could not start rsession in time', status_code=500)

        # FIXME: try to not start multiple processes at a time with some locking here
        if 'proc' not in self.state:
            self.log.info('No existing process rsession process found')
            yield self.start_process()

        return (yield super().proxy(self.port, path))
__main__.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def patch(self, udid):
        data = tornado.escape.json_decode(self.request.body)
        id = data['id']
        timeout = float(data.get('timeout', 20.0))
        print 'Timeout:', timeout
        result = self.results.get(id)
        if result is None:
            self.results[id] = self
            yield gen.sleep(timeout)
            if self.results.get(id) == self:
                del(self.results[id])
                self.write('null')
                self.finish()
        else:
            self.write(json.dumps(result))
            self.results.pop(id, None)
test_iostream.py 文件源码 项目:microProxy 作者: mike820324 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_streaming_until_close_future(self):
        server, client = self.make_iostream_pair()
        try:
            chunks = []

            @gen.coroutine
            def client_task():
                yield client.read_until_close(streaming_callback=chunks.append)

            @gen.coroutine
            def server_task():
                yield server.write(b"1234")
                yield gen.sleep(0.01)
                yield server.write(b"5678")
                server.close()

            @gen.coroutine
            def f():
                yield [client_task(), server_task()]
            self.io_loop.run_sync(f)
            self.assertEqual(chunks, [b"1234", b"5678"])
        finally:
            server.close()
            client.close()
locks_test.py 文件源码 项目:My-Web-Server-Framework-With-Python2.7 作者: syjsu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_notify_n_with_timeout(self):
        # Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
        # Wait for that timeout to expire, then do notify(2) and make
        # sure everyone runs. Verifies that a timed-out callback does
        # not count against the 'n' argument to notify().
        c = locks.Condition()
        self.record_done(c.wait(), 0)
        self.record_done(c.wait(timedelta(seconds=0.01)), 1)
        self.record_done(c.wait(), 2)
        self.record_done(c.wait(), 3)

        # Wait for callback 1 to time out.
        yield gen.sleep(0.02)
        self.assertEqual(['timeout'], self.history)

        c.notify(2)
        yield gen.sleep(0.01)
        self.assertEqual(['timeout', 0, 2], self.history)
        self.assertEqual(['timeout', 0, 2], self.history)
        c.notify()
        self.assertEqual(['timeout', 0, 2, 3], self.history)
locks_test.py 文件源码 项目:My-Web-Server-Framework-With-Python2.7 作者: syjsu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        sem = locks.Semaphore(value=0)
        futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]

        future = sem.acquire()
        self.assertEqual(102, len(sem._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(sem._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        sem.release()
        self.assertTrue(future.done())

        # Prevent "Future exception was never retrieved" messages.
        for future in futures:
            self.assertRaises(TimeoutError, future.result)
locks_test.py 文件源码 项目:My-Web-Server-Framework-With-Python2.7 作者: syjsu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_context_manager_contended(self):
        sem = locks.Semaphore()
        history = []

        @gen.coroutine
        def f(index):
            with (yield sem.acquire()):
                history.append('acquired %d' % index)
                yield gen.sleep(0.01)
                history.append('release %d' % index)

        yield [f(i) for i in range(2)]

        expected_history = []
        for i in range(2):
            expected_history.extend(['acquired %d' % i, 'release %d' % i])

        self.assertEqual(expected_history, history)
queues_test.py 文件源码 项目:My-Web-Server-Framework-With-Python2.7 作者: syjsu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_task_done(self):
        q = self.queue_class()
        for i in range(100):
            q.put_nowait(i)

        self.accumulator = 0

        @gen.coroutine
        def worker():
            while True:
                item = yield q.get()
                self.accumulator += item
                q.task_done()
                yield gen.sleep(random() * 0.01)

        # Two coroutines share work.
        worker()
        worker()
        yield q.join()
        self.assertEqual(sum(range(100)), self.accumulator)
iostream_test.py 文件源码 项目:My-Web-Server-Framework-With-Python2.7 作者: syjsu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_streaming_until_close_future(self):
        server, client = self.make_iostream_pair()
        try:
            chunks = []

            @gen.coroutine
            def client_task():
                yield client.read_until_close(streaming_callback=chunks.append)

            @gen.coroutine
            def server_task():
                yield server.write(b"1234")
                yield gen.sleep(0.01)
                yield server.write(b"5678")
                server.close()

            @gen.coroutine
            def f():
                yield [client_task(), server_task()]
            self.io_loop.run_sync(f)
            self.assertEqual(chunks, [b"1234", b"5678"])
        finally:
            server.close()
            client.close()
tornadoweb.py 文件源码 项目:tenacity 作者: jd 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def call(self, fn, *args, **kwargs):
        self.begin(fn)

        result = NO_RESULT
        exc_info = None
        start_time = now()

        while True:
            do = self.iter(result=result, exc_info=exc_info,
                           start_time=start_time)
            if isinstance(do, DoAttempt):
                try:
                    result = yield fn(*args, **kwargs)
                    exc_info = None
                    continue
                except Exception:
                    result = NO_RESULT
                    exc_info = sys.exc_info()
                    continue
            elif isinstance(do, DoSleep):
                result = NO_RESULT
                exc_info = None
                yield self.sleep(do)
            else:
                raise gen.Return(do)
builder.py 文件源码 项目:binderhub 作者: jupyterhub 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def keep_alive(self):
        """Constantly emit keepalive events

        So that intermediate proxies don't terminate an idle connection
        """
        self._keepalive = True
        while True:
            await gen.sleep(self.KEEPALIVE_INTERVAL)
            if not self._keepalive:
                return
            try:
                # lines that start with : are comments
                # and should be ignored by event consumers
                self.write(':keepalive\n\n')
                await self.flush()
            except StreamClosedError:
                return
locks_test.py 文件源码 项目:annotated-py-tornado 作者: hhstore 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_notify_n_with_timeout(self):
        # Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
        # Wait for that timeout to expire, then do notify(2) and make
        # sure everyone runs. Verifies that a timed-out callback does
        # not count against the 'n' argument to notify().
        c = locks.Condition()
        self.record_done(c.wait(), 0)
        self.record_done(c.wait(timedelta(seconds=0.01)), 1)
        self.record_done(c.wait(), 2)
        self.record_done(c.wait(), 3)

        # Wait for callback 1 to time out.
        yield gen.sleep(0.02)
        self.assertEqual(['timeout'], self.history)

        c.notify(2)
        yield gen.sleep(0.01)
        self.assertEqual(['timeout', 0, 2], self.history)
        self.assertEqual(['timeout', 0, 2], self.history)
        c.notify()
        self.assertEqual(['timeout', 0, 2, 3], self.history)
locks_test.py 文件源码 项目:annotated-py-tornado 作者: hhstore 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        c = locks.Condition()
        for _ in range(101):
            c.wait(timedelta(seconds=0.01))

        future = c.wait()
        self.assertEqual(102, len(c._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(c._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        c.notify()
        self.assertTrue(future.done())
locks_test.py 文件源码 项目:annotated-py-tornado 作者: hhstore 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_garbage_collection(self):
        # Test that timed-out waiters are occasionally cleaned from the queue.
        sem = locks.Semaphore(value=0)
        futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]

        future = sem.acquire()
        self.assertEqual(102, len(sem._waiters))

        # Let first 101 waiters time out, triggering a collection.
        yield gen.sleep(0.02)
        self.assertEqual(1, len(sem._waiters))

        # Final waiter is still active.
        self.assertFalse(future.done())
        sem.release()
        self.assertTrue(future.done())

        # Prevent "Future exception was never retrieved" messages.
        for future in futures:
            self.assertRaises(TimeoutError, future.result)


问题


面经


文章

微信
公众号

扫码关注公众号