python类seconds()的实例源码

game.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_time_limit(connection):
    advance_call = connection.protocol.advance_call
    if advance_call is None:
        return 'No time limit set'
    left = int(
        math.ceil((advance_call.getTime() - reactor.seconds()) / 60.0))
    return 'There are %s minutes left' % left
server.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def set_time_limit(self, time_limit=None, additive=False):
        advance_call = self.advance_call
        add_time = 0.0
        if advance_call is not None:
            add_time = ((advance_call.getTime() - reactor.seconds()) / 60.0)
            advance_call.cancel()
            self.advance_call = None
        time_limit = time_limit or self.default_time_limit
        if not time_limit:
            for call in self.end_calls[:]:
                call.set(None)
            return

        if additive:
            time_limit = min(time_limit + add_time, self.default_time_limit)

        seconds = time_limit * 60.0
        self.advance_call = reactor.callLater(seconds, self._time_up)

        for call in self.end_calls[:]:
            call.set(seconds)

        if self.time_announce_schedule is not None:
            self.time_announce_schedule.reset()
        self.time_announce_schedule = Scheduler(self)
        for seconds in self.time_announcements:
            self.time_announce_schedule.call_end(seconds,
                                                 self._next_time_announce)

        return time_limit
server.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _next_time_announce(self):
        remaining = self.advance_call.getTime() - reactor.seconds()
        if remaining < 60.001:
            if remaining < 10.001:
                self.send_chat('%s...' % int(round(remaining)))
            else:
                self.send_chat('%s seconds remaining.' % int(round(remaining)))
        else:
            self.send_chat('%s minutes remaining.' %
                           int(round(remaining / 60)))
server.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def master_disconnected(self, client=None):
        ServerProtocol.master_disconnected(self, client)
        if self.master and self.master_reconnect_call is None:
            if client:
                message = 'Master connection could not be established'
            else:
                message = 'Master connection lost'
            print('%s, reconnecting in 60 seconds...' % message)
            self.master_reconnect_call = reactor.callLater(
                60, self.reconnect_master)
server.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_advance_time(self):
        if not self.advance_call:
            return None
        return self.advance_call.getTime() - self.advance_call.seconds()
player.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def on_connect(self):
        protocol = self.protocol
        client_ip = self.address[0]

        if client_ip in self.protocol.bans:
            name, reason, timestamp = self.protocol.bans[client_ip]

            if timestamp is not None and reactor.seconds() >= timestamp:
                protocol.remove_ban(client_ip)
                protocol.save_bans()
            else:
                print('banned user %s (%s) attempted to join' % (name,
                                                                 client_ip))
                self.disconnect(ERROR_BANNED)
                return

        manager = self.protocol.ban_manager

        if manager is not None:
            reason = manager.get_ban(client_ip)
            if reason is not None:
                print(('federated banned user (%s) attempted to join, '
                       'banned for %r') % (client_ip, reason))
                self.disconnect(ERROR_BANNED)
                return

        ServerConnection.on_connect(self)
player.py 文件源码 项目:piqueserver 作者: piqueserver 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def on_team_join(self, team):
        if self.team is not None:
            if self.protocol.teamswitch_interval:
                teamswitch_interval = self.protocol.teamswitch_interval
                if teamswitch_interval == 'never':
                    self.send_chat('Switching teams is not allowed')
                    return False
                if (self.last_switch is not None and
                        reactor.seconds() - self.last_switch < teamswitch_interval * 60):
                    self.send_chat(
                        'You must wait before switching teams again')
                    return False
        if team.locked:
            self.send_chat('Team is locked')
            if not team.spectator and not team.other.locked:
                return team.other
            return False
        balanced_teams = self.protocol.balanced_teams
        if balanced_teams and not team.spectator:
            other_team = team.other
            if other_team.count() < team.count() + 1 - balanced_teams:
                if other_team.locked:
                    return False
                self.send_chat('Team is full, moved to %s' % other_team.name)
                return other_team
        self.last_switch = reactor.seconds()
p2p.py 文件源码 项目:p2pool-unitus 作者: amarian12 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def do_ping(self):
        start = reactor.seconds()
        yield self.get_shares(hashes=[0], parents=0, stops=[])
        end = reactor.seconds()
        defer.returnValue(end - start)
bindcache.py 文件源码 项目:privacyidea-ldap-proxy 作者: NetKnights-GmbH 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, timeout=5):
        """
        :param timeout: Number of seconds after which the entry is removed from the bind cache
        """
        self.timeout = timeout
        #: Map of tuples (dn, app_marker, password) to insertion timestamps (determined using ``reactor.seconds``)
        self._cache = {}
p2p.py 文件源码 项目:p2pool-dgb-sha256 作者: ilsawa 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def do_ping(self):
        start = reactor.seconds()
        yield self.get_shares(hashes=[0], parents=0, stops=[])
        end = reactor.seconds()
        defer.returnValue(end - start)
p2p.py 文件源码 项目:p2pool-ltc 作者: ilsawa 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def do_ping(self):
        start = reactor.seconds()
        yield self.get_shares(hashes=[0], parents=0, stops=[])
        end = reactor.seconds()
        defer.returnValue(end - start)
p2p.py 文件源码 项目:p2pool-bsty 作者: amarian12 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def do_ping(self):
        start = reactor.seconds()
        yield self.get_shares(hashes=[0], parents=0, stops=[])
        end = reactor.seconds()
        defer.returnValue(end - start)
p2p.py 文件源码 项目:p2pool-cann 作者: ilsawa 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def do_ping(self):
        start = reactor.seconds()
        yield self.get_shares(hashes=[0], parents=0, stops=[])
        end = reactor.seconds()
        defer.returnValue(end - start)
queue.py 文件源码 项目:ccs-twistedextensions 作者: apple 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _periodicLostWorkCheck(self):
        """
        Periodically, every node controller has to check to make sure that work
        hasn't been dropped on the floor by someone.  In order to do that it
        queries each work-item table.
        """
        @inlineCallbacks
        def workCheck(txn):
            if self.thisProcess:
                nodes = [(node.hostname, node.port) for node in
                         (yield self.activeNodes(txn))]
                nodes.sort()
                self._lastSeenTotalNodes = len(nodes)
                self._lastSeenNodeIndex = nodes.index(
                    (self.thisProcess.hostname, self.thisProcess.port)
                )

            for itemType in self.allWorkItemTypes():
                tooLate = datetime.utcfromtimestamp(
                    self.reactor.seconds() - self.queueProcessTimeout
                )
                overdueItems = (yield itemType.query(
                    txn, (itemType.notBefore < tooLate))
                )
                for overdueItem in overdueItems:
                    peer = self.choosePerformer()
                    yield peer.performWork(overdueItem.table,
                                           overdueItem.workID)

        if not self.running:
            return succeed(None)

        return inTransaction(self.transactionFactory, workCheck)
queue.py 文件源码 项目:ccs-twistedextensions 作者: apple 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _lostWorkCheckLoop(self):
        """
        While the service is running, keep checking for any overdue / lost work
        items and re-submit them to the cluster for processing.  Space out
        those checks in time based on the size of the cluster.
        """
        self._lostWorkCheckCall = None

        if not self.running:
            return

        @passthru(
            self._periodicLostWorkCheck().addErrback(log.err).addCallback
        )
        def scheduleNext(result):
            self._currentWorkDeferred = None
            if not self.running:
                return
            index = self.nodeIndex()
            now = self.reactor.seconds()

            interval = self.queueDelayedProcessInterval
            count = self.totalNumberOfNodes()
            when = (now - (now % interval)) + (interval * (count + index))
            delay = when - now
            self._lostWorkCheckCall = self.reactor.callLater(
                delay, self._lostWorkCheckLoop
            )

        self._currentWorkDeferred = scheduleNext
test_jobs.py 文件源码 项目:ccs-twistedextensions 作者: apple 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_notBeforeWhenCheckingForWork(self):
        """
        L{ControllerQueue._workCheck} should execute any
        outstanding work items, but only those that are expired.
        """
        dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()
        fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)

        # Let's create a couple of work items directly, not via the enqueue
        # method, so that they exist but nobody will try to immediately execute
        # them.

        @transactionally(dbpool.pool.connection)
        @inlineCallbacks
        def setup(txn):
            # First, one that's right now.
            yield DummyWorkItem.makeJob(txn, a=1, b=2, notBefore=fakeNow)

            # Next, create one that's actually far enough into the past to run.
            yield DummyWorkItem.makeJob(
                txn, a=3, b=4, notBefore=(
                    # Schedule it in the past so that it should have already
                    # run.
                    fakeNow - datetime.timedelta(seconds=20)
                )
            )

            # Finally, one that's actually scheduled for the future.
            yield DummyWorkItem.makeJob(
                txn, a=10, b=20, notBefore=fakeNow + datetime.timedelta(1000)
            )
        yield setup

        # Wait for job
        while len(DummyWorkItem.results) != 2:
            clock.advance(1)

        # Work item complete
        self.assertTrue(DummyWorkItem.results == {1: 3, 2: 7})
test_jobs.py 文件源码 项目:ccs-twistedextensions 作者: apple 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_temporaryFailure(self):
        """
        When a work item temporarily fails it should appear as unassigned in the JOB
        table and have the failure count bumped, and a notBefore set to the temporary delay.
        """
        dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()
        fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)

        # Let's create a couple of work items directly, not via the enqueue
        # method, so that they exist but nobody will try to immediately execute
        # them.

        @transactionally(dbpool.pool.connection)
        @inlineCallbacks
        def setup(txn):
            # Next, create failing work that's actually far enough into the past to run.
            yield DummyWorkItem.makeJob(
                txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)
            )
        yield setup
        clock.advance(20 - 12)

        @transactionally(dbpool.pool.connection)
        def check(txn):
            return JobItem.all(txn)

        jobs = yield check
        self.assertTrue(len(jobs) == 1)
        self.assertTrue(jobs[0].assigned is None)
        self.assertEqual(jobs[0].isAssigned, 0)
        self.assertTrue(jobs[0].failed == 1)
        self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow() + datetime.timedelta(seconds=90))
test_internet.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_callLaterUsesReactorSecondsInDelayedCall(self):
        """
        L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
        should use the reactor's seconds factory
        to produce the time at which the DelayedCall will be called.
        """
        oseconds = reactor.seconds
        reactor.seconds = lambda: 100
        try:
            call = reactor.callLater(5, lambda: None)
            self.assertEqual(call.getTime(), 105)
        finally:
            reactor.seconds = oseconds
        call.cancel()
test_internet.py 文件源码 项目:zenchmarks 作者: squeaky-pl 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def testDelayedCallSecondsOverride(self):
        """
        Test that the C{seconds} argument to DelayedCall gets used instead of
        the default timing function, if it is not None.
        """
        def seconds():
            return 10
        dc = base.DelayedCall(5, lambda: None, (), {}, lambda dc: None,
                              lambda dc: None, seconds)
        self.assertEqual(dc.getTime(), 5)
        dc.reset(3)
        self.assertEqual(dc.getTime(), 13)
test_p2p.py 文件源码 项目:p2pool-bch 作者: amarian12 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_tx_limit(self):
        class MyNode(p2p.Node):
            def __init__(self, df):
                p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)

                self.df = df
                self.sent_time = 0

            @defer.inlineCallbacks
            def got_conn(self, conn):
                p2p.Node.got_conn(self, conn)

                yield deferral.sleep(.5)

                new_mining_txs = dict(self.mining_txs_var.value)
                for i in xrange(3):
                    huge_tx = dict(
                        version=0,
                        tx_ins=[],
                        tx_outs=[dict(
                            value=0,
                            script='x'*900000,
                        )],
                        lock_time=i,
                    )
                    new_mining_txs[bitcoin_data.hash256(bitcoin_data.tx_type.pack(huge_tx))] = huge_tx
                self.mining_txs_var.set(new_mining_txs)

                self.sent_time = reactor.seconds()

            def lost_conn(self, conn, reason):
                self.df.callback(None)
        try:
            p2p.Protocol.max_remembered_txs_size *= 10

            df = defer.Deferred()
            n = MyNode(df)
            n.start()
            yield df
            if not (n.sent_time <= reactor.seconds() <= n.sent_time + 1):
                raise ValueError('node did not disconnect within 1 seconds of receiving too much tx data')
            yield n.stop()
        finally:
            p2p.Protocol.max_remembered_txs_size //= 10


问题


面经


文章

微信
公众号

扫码关注公众号