python类CancelledError()的实例源码

resource.py 文件源码 项目:fluiddb 作者: fluidinfo 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _logInternalError(request, exception, fail, resourceClass):
    """
    Log a FluidDB internal error. Apart from doing the obvious things, we
    also pull all relevant tags off any Thrift error (our internal
    errors tend to come from calls we've made to the facade service via
    Thrift) and log them too.
    """
    log.msg('Request %s: Exception calling %r#deferred_render_%r ' %
            (request._fluidDB_reqid, resourceClass.__class__.__name__,
             request.method))
    log.msg(exception)
    traceback = fail.getTraceback()

    # If we get a CancelledError, we only log it as a warning, this is not a
    # sever error and it causes too much noise in the log files.
    if fail.check(CancelledError):
        logging.warning(traceback)
    else:
        logging.error(traceback)

    tags = thriftExceptions.get(exception.__class__)
    if tags:
        msg = []
        for tag in tags:
            msg.append('Failure tag %r: %r' %
                       (tag, getattr(exception, tag)))
        if msg:
            log.msg('\n'.join(msg))
test_resource.py 文件源码 项目:fluiddb 作者: fluidinfo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testDelayedDisconnectDoesNotFinishRequest(self):
        """
        A C{CancelledError} exception is raised if content cannot be read
        from the request midway through processing, due to the client
        disconnecting.  In such cases, the C{Request.finish} method is not
        invoked by the L{handleRequestError} handler to avoid causing a
        failure in Twisted.
        """
        failure = Failure(CancelledError("Client disconnected partway."))
        handleRequestError(failure, self.request, self.resource)
        self.assertFalse(self.request.finished)
deferral.py 文件源码 项目:p2pool-cann 作者: ilsawa 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
service.py 文件源码 项目:crondeamon 作者: zhoukunpeng504 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _init(cls,tid,initcode=True):
        try:
            tid=int(tid)
            result=yield  run_conn_fun("runQuery","select ip,svnpath,svnuser,svnpasswd,version,rule    from   cron_task WHERE  tid=%s",(tid,))
            ip,svnpath,svnuser,svnpasswd,svnversion,rule=result[0]
            if  initcode==True:
                _defer =SubRpc().xmlrpc_init(int(tid),svnpath,int(svnversion),svnuser,svnpasswd)
                set_time_out(2,_defer)
                try:
                    yield  _defer
                except defer.CancelledError :
                    pass
            else:
                pass
            if not  cls.BUFF.has_key(tid):
                pass
            else:
                if cls.BUFF[tid].running:
                    cls.BUFF[tid].stop()
            schedule=CronSchedule(rule)
            sc=ScheduledCall(cls._run,tid)
            sc.start(schedule)
            cls.BUFF[tid]=sc
            defer.returnValue(True)
        except Exception as e:
            defer.returnValue((False,str(e)))
service.py 文件源码 项目:crondeamon 作者: zhoukunpeng504 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _init(cls,tid,initcode=True):
        try:
            tid=int(tid)
            result=yield  run_conn_fun("runQuery","select ip,svnpath,svnuser,svnpasswd,version      from   task_task WHERE  tid=%s",(tid,))
            ip,svnpath,svnuser,svnpasswd,svnversion =result[0]
            if  initcode==True:
                _defer =SubRpc().xmlrpc_init(tid,svnpath,svnversion,svnuser,svnpasswd,mode="task")
                set_time_out(2,_defer)
                try:
                    yield  _defer
                except defer.CancelledError :
                    pass
            else:
                pass
            if not  cls.BUFF.has_key(tid):
                pass
            else:
                if cls.BUFF[tid].running:
                    cls.BUFF[tid].stop()
                else:
                    pass
                del cls.BUFF[tid]
            _task=task.LoopingCall(cls._check,tid)
            _task.start(60,now=False)  #??????
            yield  cls._check(tid)
            cls.BUFF[tid]=_task
            defer.returnValue(True)
        except Exception as e:
            defer.returnValue((False,str(e)))
main.py 文件源码 项目:iotdm-pyclient 作者: peterchauyw 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def print_error(self, failure, card):
        r = failure.trap(InvalidURI, FragmentNotAllowed, socket.gaierror, socket.error, error.RequestTimedOut, defer.CancelledError)
        if r == InvalidURI:
            log.msg("Error: invalid URI")
            card.response_payload.text = "Error: Invalid URI!"
        elif r == FragmentNotAllowed:
            log.msg("Error: fragment found")
            card.response_payload.text = "Error: URI fragment not allowed for CoAP!"
        elif r == socket.gaierror or r == socket.error:
            log.msg("Error: hostname not found")
            card.response_payload.text = "Error: hostname not found!"
        elif r == error.RequestTimedOut:
            log.msg("Error: request timed out")
            card.response_payload.text = 'Error: request timed out!'
consumer.py 文件源码 项目:afkak 作者: ciena 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _handle_offset_error(self, failure):
        """
        Retry the offset fetch request if appropriate.

        Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we
        log a warning.  This should perhaps be extended to abort sooner on
        certain errors.
        """
        # outstanding request got errback'd, clear it
        self._request_d = None

        if self._stopping and failure.check(CancelledError):
            # Not really an error
            return
        # Do we need to abort?
        if (self.request_retry_max_attempts != 0 and
                self._fetch_attempt_count >= self.request_retry_max_attempts):
            log.debug(
                "%r: Exhausted attempts: %d fetching offset from kafka: %r",
                self, self.request_retry_max_attempts, failure)
            self._start_d.errback(failure)
            return
        # Decide how to log this failure... If we have retried so many times
        # we're at the retry_max_delay, then we log at warning every other time
        # debug otherwise
        if (self.retry_delay < self.retry_max_delay or
                0 == (self._fetch_attempt_count % 2)):
            log.debug("%r: Failure fetching offset from kafka: %r", self,
                      failure)
        else:
            # We've retried until we hit the max delay, log at warn
            log.warning("%r: Still failing fetching offset from kafka: %r",
                        self, failure)
        self._retry_fetch()
consumer.py 文件源码 项目:afkak 作者: ciena 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _handle_commit_error(self, failure, retry_delay, attempt):
        """ Retry the commit request, depending on failure type

        Depending on the type of the failure, we retry the commit request
        with the latest processed offset, or callback/errback self._commit_ds
        """
        # Check if we are stopping and the request was cancelled
        if self._stopping and failure.check(CancelledError):
            # Not really an error
            return self._deliver_commit_result(self._last_committed_offset)

        # Check that the failure type is a Kafka error...this could maybe be
        # a tighter check to determine whether a retry will succeed...
        if not failure.check(KafkaError):
            log.error("Unhandleable failure during commit attempt: %r\n\t%r",
                      failure, failure.getBriefTraceback())
            return self._deliver_commit_result(failure)

        # Do we need to abort?
        if (self.request_retry_max_attempts != 0 and
                attempt >= self.request_retry_max_attempts):
            log.debug("%r: Exhausted attempts: %d to commit offset: %r",
                      self, self.request_retry_max_attempts, failure)
            return self._deliver_commit_result(failure)

        # Check the retry_delay to see if we should log at the higher level
        # Using attempts % 2 gets us 1-warn/minute with defaults timings
        if (retry_delay < self.retry_max_delay or 0 == (attempt % 2)):
            log.debug("%r: Failure committing offset to kafka: %r", self,
                      failure)
        else:
            # We've retried until we hit the max delay, log alternately at warn
            log.warning("%r: Still failing committing offset to kafka: %r",
                        self, failure)

        # Schedule a delayed call to retry the commit
        retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR,
                          self.retry_max_delay)
        self._commit_call = self._get_clock().callLater(
            retry_delay, self._send_commit_request, retry_delay, attempt + 1)
consumer.py 文件源码 项目:afkak 作者: ciena 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _handle_processor_error(self, failure):
        """Handle a failure in the processing of a block of messages

        This method is called when the processor func fails while processing
        a block of messages. Since we can't know how best to handle a
        processor failure, we just :func:`errback` our :func:`start` method's
        deferred to let our user know about the failure.
        """
        # Check if we're stopping/stopped and the errback of the processor
        # deferred is just the cancelling we initiated.  If so, we skip
        # notifying via the _start_d deferred, as it will be 'callback'd at the
        # end of stop()
        if not (self._stopping and failure.check(CancelledError)):
            if self._start_d:  # Make sure we're not already stopped
                self._start_d.errback(failure)
test_consumer.py 文件源码 项目:afkak 作者: ciena 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_consumer_stop_during_commit(self):
        # setup a client which will return a message block in response to fetch
        # and just fail on the commit
        mockclient = Mock()
        mockclient.send_offset_commit_request.return_value = Deferred()
        mockclient.send_fetch_request.return_value = Deferred()
        the_group = 'U2'
        the_topic = 'test_consumer_stop_during_commit'
        the_part = 11
        the_offset = 0
        # Create a consumer and muck with the state a bit...
        consumer = Consumer(mockclient, the_topic, the_part, Mock(), the_group,
                            auto_commit_every_ms=0)
        mockback = Mock()
        start_d = consumer.start(the_offset)
        start_d.addCallback(mockback)
        consumer._last_processed_offset = the_offset  # Fake processed msgs

        # Start a commit, don't fire the deferred, assert there's no result
        commit_d = consumer.commit()
        self.assertNoResult(commit_d)
        self.assertEqual(consumer._commit_ds[0], commit_d)

        # Stop the consumer, assert the start_d fired, and commit_d errbacks
        consumer.stop()
        mockback.assert_called_once_with('Stopped')
        self.failureResultOf(commit_d, CancelledError)
test_adaptendpoint.py 文件源码 项目:ccs-twistedextensions 作者: apple 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_disconnectWhileConnecting(self):
        """
        When the L{IConnector} is told to C{disconnect} before an in-progress
        L{Deferred} from C{connect} has fired, it will cancel that L{Deferred}.
        """
        self.connector.disconnect()
        self.assertEqual(len(self.factory.fails), 1)
        self.assertTrue(self.factory.fails[0].reason.check(CancelledError))
test_adaptendpoint.py 文件源码 项目:ccs-twistedextensions 作者: apple 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_stopConnectingWhileConnecting(self):
        """
        When the L{IConnector} is told to C{stopConnecting} while another
        attempt is still in flight, it cancels that connection.
        """
        self.connector.stopConnecting()
        self.assertEqual(len(self.factory.fails), 1)
        self.assertTrue(self.factory.fails[0].reason.check(CancelledError))
omci_cc.py 文件源码 项目:voltha 作者: opencord 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _request_failure(self, value, tx_tid):
        if tx_tid in self._requests:
            (_, _, _, timeout) = self._requests.pop(tx_tid)
        else:
            # tx_msg = None
            timeout = 0

        if isinstance(value, failure.Failure):
            value.trap(CancelledError)
            self._rx_timeouts += 1
            self._consecutive_errors += 1
            self.log.info('timeout', tx_id=tx_tid, timeout=timeout)
            value = failure.Failure(TimeoutError(timeout, "Deferred"))

        return value
test_internet.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_stopServiceBeforeStartFinished(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not yet fired.  No error will be logged
        about the cancellation of the listen attempt.
        """
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addBoth(l.append)
        self.assertEqual(l, [None])
        self.assertEqual(self.flushLoggedErrors(CancelledError), [])
test_internet.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_stopServiceCancelStartError(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not fired yet.  An error will be logged
        if the resulting exception is not L{CancelledError}.
        """
        self.fakeServer.cancelException = ZeroDivisionError()
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addCallback(l.append)
        self.assertEqual(l, [None])
        stoppingErrors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(stoppingErrors), 1)
test_internet.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_startServiceWhileStopped(self):
        """
        When L{ClientService} is stopped - that is,
        L{ClientService.stopService} has been called and the L{Deferred} it
        returns has fired - calling L{startService} will cause a new connection
        to be made, and new calls to L{whenConnected} to succeed.
        """
        cq, service = self.makeReconnector(fireImmediately=False)
        stopped = service.stopService()
        self.successResultOf(stopped)
        self.failureResultOf(service.whenConnected(), CancelledError)
        service.startService()
        cq.connectQueue[-1].callback(None)
        self.assertIdentical(cq.applicationProtocols[-1],
                             self.successResultOf(service.whenConnected()))
test_internet.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def test_clientConnectionLostWhileStopping(self):
        """
        When a client connection is lost while the service is stopping, the
        protocol stopping deferred is called and the reference to the protocol
        is removed.
        """
        clock = Clock()
        cq, service = self.makeReconnector(clock=clock)
        d = service.stopService()
        cq.constructedProtocols[0].connectionLost(Failure(IndentationError()))
        self.failureResultOf(service.whenConnected(), CancelledError)
        self.assertTrue(d.called)
endpoints.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _executeCommand(self, connection, protocolFactory):
        """
        Given a secured SSH connection, try to execute a command in a new
        channel created on it and associate the result with a protocol from the
        given factory.

        @param connection: See L{SSHCommandClientEndpoint.existingConnection}'s
            C{connection} parameter.

        @param protocolFactory: See L{SSHCommandClientEndpoint.connect}'s
            C{protocolFactory} parameter.

        @return: See L{SSHCommandClientEndpoint.connect}'s return value.
        """
        commandConnected = Deferred()
        def disconnectOnFailure(passthrough):
            # Close the connection immediately in case of cancellation, since
            # that implies user wants it gone immediately (e.g. a timeout):
            immediate =  passthrough.check(CancelledError)
            self._creator.cleanupConnection(connection, immediate)
            return passthrough
        commandConnected.addErrback(disconnectOnFailure)

        channel = _CommandChannel(
            self._creator, self._command, protocolFactory, commandConnected)
        connection.openChannel(channel)
        return commandConnected
test_endpoints.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_connectionCancelledBeforeSecure(self):
        """
        If the connection is cancelled before the SSH transport layer has
        finished key exchange (ie, gotten to the point where we may attempt to
        authenticate), the L{Deferred} returned by
        L{SSHCommandClientEndpoint.connect} fires with a L{Failure} wrapping
        L{CancelledError} and the connection is aborted.
        """
        endpoint = SSHCommandClientEndpoint.newConnection(
            self.reactor, b"/bin/ls -l", b"dummy user",
            self.hostname, self.port, knownHosts=self.knownHosts,
            ui=FixedResponseUI(False))

        factory = Factory()
        factory.protocol = Protocol
        d = endpoint.connect(factory)

        transport = AbortableFakeTransport(None, isServer=False)
        factory = self.reactor.tcpClients[0][2]
        client = factory.buildProtocol(None)
        client.makeConnection(transport)
        d.cancel()

        self.failureResultOf(d).trap(CancelledError)
        self.assertTrue(transport.aborted)
        # Make sure the connection closing doesn't result in unexpected
        # behavior when due to cancellation:
        client.connectionLost(Failure(ConnectionDone()))


问题


面经


文章

微信
公众号

扫码关注公众号