def simulate_component(self, compID):
"""
Do simulation for the specified foreground component.
"""
logger.info("==================================================")
logger.info(">>> Simulate component: %s <<<" % compID)
logger.info("==================================================")
t1_start = time.perf_counter()
t2_start = time.process_time()
comp_cls = COMPONENTS_ALL[compID]
comp_obj = comp_cls(self.configs)
comp_obj.preprocess()
skyfiles = comp_obj.simulate()
if self.products:
self.products.add_component(compID, skyfiles)
comp_obj.postprocess()
t1_stop = time.perf_counter()
t2_stop = time.process_time()
logger.info("--------------------------------------------------")
logger.info("Elapsed time: %.1f [min]" % ((t1_stop-t1_start)/60))
logger.info("CPU process time: %.1f [min]" % ((t2_stop-t2_start)/60))
logger.info("--------------------------------------------------")
python类perf_counter()的实例源码
def acquire(self, blocking=True, timeout=-1):
"""Must be used with 'yield' as 'yield lock.acquire()'.
"""
if not blocking and self._owner is not None:
raise StopIteration(False)
if not self._scheduler:
self._scheduler = Pycos.scheduler()
task = Pycos.cur_task(self._scheduler)
if timeout < 0:
timeout = None
while self._owner is not None:
if timeout is not None:
if timeout <= 0:
raise StopIteration(False)
start = _time()
self._waitlist.append(task)
if (yield task._await_(timeout)) is None:
try:
self._waitlist.remove(task)
except ValueError:
pass
if timeout is not None:
timeout -= (_time() - start)
self._owner = task
raise StopIteration(True)
def check_cooldowns(self, userid, action, settings):
path = settings["Config"][action]
if abs(settings["Players"][userid][action] - int(time.perf_counter())) >= path:
settings["Players"][userid][action] = int(time.perf_counter())
dataIO.save_json(self.file_path, self.system)
return True
elif settings["Players"][userid][action] == 0:
settings["Players"][userid][action] = int(time.perf_counter())
dataIO.save_json(self.file_path, self.system)
return True
else:
s = abs(settings["Players"][userid][action] - int(time.perf_counter()))
seconds = abs(s - path)
remaining = self.time_formatting(seconds)
await self.bot.say("This action has a cooldown. You still have:\n{}".format(remaining))
return False
def check_cooldowns(self, settings, userid):
if abs(settings["Users"][userid]["Trade Cooldown"] - int(time.perf_counter())) \
>= settings["Config"]["Trade Cooldown"]:
settings["Users"][userid]["Trade Cooldown"] = int(time.perf_counter())
dataIO.save_json(self.file_path, self.system)
return "OK"
elif settings["Users"][userid]["Trade Cooldown"] == 0:
settings["Users"][userid]["Trade Cooldown"] = int(time.perf_counter())
dataIO.save_json(self.file_path, self.system)
return "OK"
else:
s = abs(settings["Users"][userid]["Trade Cooldown"] - int(time.perf_counter()))
seconds = abs(s - settings["Config"]["Trade Cooldown"])
msg = ("You must wait before trading again. "
"You still have: {}".format(self.time_format(seconds)))
return msg
def heist_game(self, settings, server, t_heist, t_crew, t_vault):
crew = len(settings["Crew"])
target = self.heist_target(settings, crew)
settings["Config"]["Heist Start"] = True
players = [server.get_member(x) for x in settings["Crew"]]
results = self.game_outcomes(settings, players, target)
start_output = self.message_handler(settings, crew, players)
await self.bot.say("Get ready! The {} is starting with {}\nThe {} has decided to "
"hit **{}**.".format(t_heist, start_output, t_crew, target))
await asyncio.sleep(3)
await self.show_results(settings, results)
if settings["Crew"]:
players = [server.get_member(x) for x in settings["Crew"]]
data = self.calculate_credits(settings, players, target)
headers = ["Players", "Credits Obtained", "Bonuses", "Total"]
t = tabulate(data, headers=headers)
msg = ("The credits collected from the {} was split among the winners:\n```"
"C\n{}```".format(t_vault, t))
else:
msg = "No one made it out safe."
settings["Config"]["Alert Time"] = int(time.perf_counter())
self.reset_heist(settings)
self.save_system()
await self.bot.say(msg)
def shutdown_save(self):
for server in self.system["Servers"]:
death_time = self.system["Servers"][server]["Config"]["Death Timer"]
for player in self.system["Servers"][server]["Players"]:
player_death = self.system["Servers"][server]["Players"][player]["Death Timer"]
player_sentence = self.system["Servers"][server]["Players"][player]["Time Served"]
sentence = self.system["Servers"][server]["Players"][player]["Sentence"]
if player_death > 0:
s = abs(player_death - int(time.perf_counter()))
seconds = abs(s - death_time)
self.system["Servers"][server]["Players"][player]["Death Timer"] = seconds
if player_sentence > 0:
s = abs(player_sentence - int(time.perf_counter()))
seconds = abs(s - sentence)
self.system["Servers"][server]["Players"][player]["Time Served"] = seconds
def __init__(self, title = None, pathfile = None, debug_mode = True, debug_level = 0):
self.path_file = pathfile
self.debug_mode = debug_mode
self.starttime = time.perf_counter()
self.nowtime = time.perf_counter()
self.lastcall = time.perf_counter()
self.debug_level = debug_level
# create file?
# if not isfile(self.path_file):
# with open(self.path_file, 'w') as f:
# f.write("-init log file-")
if title is not None:
today = datetime.datetime.now()
s = title + " program started the " + today.strftime("%d of %b %Y at %H:%M")
self.log("=============================================================\n" +
s +
"\n=============================================================")
def __call__(self, func, *args, **kwargs):
'''Used to process callbacks in throughput-limiting thread
through queue.
Args:
func (:obj:`callable`): the actual function (or any callable) that
is processed through queue.
*args: variable-length `func` arguments.
**kwargs: arbitrary keyword-arguments to `func`.
Returns:
None
'''
if not self.is_alive() or self.__exit_req:
raise DelayQueueError('Could not process callback in stopped thread')
self._queue.put((func, args, kwargs))
# The most straightforward way to implement this is to use 2 sequenital delay
# queues, like on classic delay chain schematics in electronics.
# So, message path is:
# msg --> group delay if group msg, else no delay --> normal msg delay --> out
# This way OS threading scheduler cares of timings accuracy.
# (see time.time, time.clock, time.perf_counter, time.sleep @ docs.python.org)
def benchmark(package_name):
def decorator(func):
def inner():
try:
package = import_module(package_name)
except ImportError:
return 'not available.'
start = perf_counter()
for i in range(TIMES):
func(package)
end = perf_counter()
return end - start
return inner
return decorator
def start_game(self):
builtins.cfg = \
chess_config.Config(self.config_name.lower(), self.crazy_mode)
self.game._reset()
agent1_module = __import__(self.agent1 + "_ChessPlayer")
white_opponent_class = getattr(agent1_module,
self.agent1 + "_ChessPlayer")
self.white_opponent = white_opponent_class(self.game.board,'white')
agent2_module = __import__(self.agent2 + "_ChessPlayer")
black_opponent_class = getattr(agent2_module,
self.agent2 + "_ChessPlayer")
self.black_opponent = black_opponent_class(self.game.board,'black')
self.game.white_player = self.agent1
self.game.black_player = self.agent2
self.player_time = {'white':0.0,'black':0.0}
self.timer = time.perf_counter()
move = self.take_player_turn()
self.attempt_to_make_move(self.game.board[move[0]], *move)
while self.game.started:
move = self.switch_player_turn()
self.attempt_to_make_move(self.game.board[move[0]], *move)
def switch_player_turn(self, the_time):
if self.do_replay:
self.player_time[self.game.player_turn] = the_time
else:
time_against = time.perf_counter() - self.timer
self.player_time[self.game.player_turn] += time_against
self.player_time_label[self.game.player_turn]['text'] = \
'{:.1f} sec'.format(self.player_time[self.game.player_turn])
if self.player_time[self.game.player_turn] > cfg.TIME_LIMIT:
self.player_time_label[self.game.player_turn]['foreground'] = 'red'
self.turn_label['foreground'] = self.game.player_turn
self.game.player_turn = \
'black' if self.game.player_turn == 'white' else 'white'
self.turn_label['text'] = self.game.player_turn.capitalize()
self.turn_label['background'] = self.game.player_turn
if not self.do_replay:
if self.player_time[self.game.player_turn] > cfg.TIME_LIMIT:
self.force_random_move()
else:
self.take_player_turn()
def query(sql, **kwargs):
ti = time.perf_counter()
_query = text(sql).execution_options(autocommit=False)
try:
res = conn.execute(_query, **kwargs)
ms = int((time.perf_counter() - ti) * 1000)
QueryStats.log(sql, ms)
if ms > 100:
disp = re.sub('\s+', ' ', sql).strip()[:250]
print("\033[93m[SQL][{}ms] {}\033[0m".format(ms, disp))
logger.debug(res)
return res
except Exception as e:
print("[SQL] Error in query {} ({})".format(sql, kwargs))
conn.close()
logger.exception(e)
raise e
# n*m
def __exec(self, method, *params):
time_start = time.perf_counter()
tries = 0
while True:
try:
result = self._client.exec(method, *params)
assert result, "empty response {}".format(result)
except (AssertionError, RPCError) as e:
tries += 1
print("{} failure, retry in {}s -- {}".format(method, tries, e))
time.sleep(tries)
continue
break
batch_size = len(params[0]) if method == 'get_accounts' else 1
total_time = int((time.perf_counter() - time_start) * 1000)
ClientStats.log("%s()" % method, total_time, batch_size)
return result
# perform batch call (if jussi is enabled, use batches; otherwise, multi)
def __exec_batch(self, method, params):
time_start = time.perf_counter()
result = None
if self._jussi:
tries = 0
while True:
try:
result = list(self._client.exec_batch(method, params, batch_size=500))
break
except (AssertionError, RPCError) as e:
tries += 1
print("batch {} failure, retry in {}s -- {}".format(method, tries, e))
time.sleep(tries)
continue
else:
result = list(self._client.exec_multi_with_futures(
method, params, max_workers=10))
total_time = int((time.perf_counter() - time_start) * 1000)
ClientStats.log("%s()" % method, total_time, len(params))
return result
def cache_accounts(cls, accounts):
from hive.indexer.cache import batch_queries
processed = 0
total = len(accounts)
for i in range(0, total, 1000):
batch = accounts[i:i+1000]
lap_0 = time.perf_counter()
sqls = cls._generate_cache_sqls(batch)
lap_1 = time.perf_counter()
batch_queries(sqls)
lap_2 = time.perf_counter()
if len(batch) < 1000:
continue
processed += len(batch)
rem = total - processed
rate = len(batch) / (lap_2 - lap_0)
pct_db = int(100 * (lap_2 - lap_1) / (lap_2 - lap_0))
print(" -- account {} of {} ({}/s, {}% db) -- {}m remaining".format(
processed, total, round(rate, 1), pct_db, round(rem / rate / 60, 2)))
def update_ranks(cls):
sql = """
UPDATE hive_accounts
SET rank = r.rnk
FROM (SELECT id, ROW_NUMBER() OVER (ORDER BY vote_weight DESC) as rnk FROM hive_accounts) r
WHERE hive_accounts.id = r.id AND rank != r.rnk;
"""
query(sql)
return
# the following method is 10-20x slower
id_weight = query_all("SELECT id, vote_weight FROM hive_accounts")
id_weight = sorted(id_weight, key=lambda el: el[1], reverse=True)
print("Updating account ranks...")
lap_0 = time.perf_counter()
query("START TRANSACTION")
for (i, (_id, _)) in enumerate(id_weight):
query("UPDATE hive_accounts SET rank=%d WHERE id=%d" % (i+1, _id))
query("COMMIT")
lap_1 = time.perf_counter()
print("Updated %d ranks in %ds" % (len(id_weight), lap_1 - lap_0))
def ws_connect(self, url: str, timeout: float = None
) -> WebSocketClientConnection:
"""Make WebSocket connection to the url.
Retries up to _max (default: 20) times. Client connections made by this
method are closed after each test method.
"""
st = time.perf_counter()
timeout = timeout or self.timeout
while (time.perf_counter() - st) < timeout:
try:
ws = await to_asyncio_future(websocket_connect(url))
except ConnectionRefusedError:
await self.wait()
continue
else:
self._ws_connections.append(ws)
return ws
raise ConnectionRefusedError(
'WebSocket connection refused: {}'.format(url))
def do_work(job):
tstart = time.perf_counter()
status_code = None
timeout = False
connection_error = False
expect_not_present = False
try:
result = requests.get(job.get('url'), timeout=job.get('timeout'))
status_code = result.status_code
if job.get('expect'):
if job.get('expect') not in result.text:
expect_not_present = True
except ReadTimeout:
timeout = True
except socket_error:
connection_error = True
return {'status_code': status_code, 'ms': (time.perf_counter()-tstart)*1000, 'timeout': timeout,
'connection_error': connection_error,
'tstart': tstart, 'tstop': time.perf_counter(), 'expect_not_present': expect_not_present}
def try_with_timeout(target, args, timeout, pause):
"""
Attempt an operation as long as it fails up to the given timeout.
:param target: The callable
:param args: The arguments to pass
:param timeout: the timeout
:param pause: how long to pause between each failed invocation
:return: either the result from successfully invoking the callable
raises the exception thrown by the callable on timeout.
"""
end = time.perf_counter()+timeout
result, exception = try_invoke(target, args)
while exception is not None and time.perf_counter()<end:
time.sleep(pause)
result, exception = try_invoke(target, args)
if exception is not None:
raise exception
return result
def upload_blocks(bucket, chunk_size, max_threads, lines):
session = botocore.session.get_session()
client = session.create_client('s3')
start = time.perf_counter()
futures = []
with ThreadPoolExecutor(max_workers=max_threads) as executor:
# Start the load operations and mark each future with its URL
for line in lines:
raw_block, key = load_json_block(line)
futures.append(executor.submit(client.put_object,Bucket=bucket,
Key=key,
Body=raw_block,
ContentEncoding='UTF-8',
ContentType='application/json'))
end = time.perf_counter()
done, pending = concurrent.futures.wait(futures)
complete = time.perf_counter()
rate = 1 / ((complete - start) / len(done))
return len(done), int(rate)
def __init__(self, signal_in=None, name="layer", print_fps=False, print_fps_every=timedelta(seconds=5),
*args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
self.counter = 0
self.signal = None
self.is_first = True
self.stop_event = None
self.signal_in = None
self.set_signal_in(signal_in)
self.count = 0
self.start_time = None
self.reset()
self.print_fps_every = print_fps_every
self.print_fps = print_fps
self.fps = 0
self.last_tick_time = time.perf_counter()
def runPutTest(testDataPath, testDataRangeStart, testDataRangeEnd, f):
log.debug('running put tests...')
timeStart = time.perf_counter()
times = [time.perf_counter()]
for i in range(testDataRangeStart, testDataRangeEnd):
print(i)
thisPath = '%s/%i' % (testDataPath, i)
o = loadTestData(thisPath)
f.putObject(o, str(i))
times.append(time.perf_counter())
timeEnd = time.perf_counter()
log.warning('RESULT (PUT): total test runtime: %s seconds, mean per object: %s' % (
timeEnd - timeStart, ((timeEnd - timeStart) / testDataRangeEnd)))
log.critical('RESULT (PUT): median result: %s ' % statistics.median(calculateTimeDeltas(times)))
log.critical('RESULT (PUT): standard deviation result: %s ' % statistics.stdev(calculateTimeDeltas(times)))
log.critical('RESULT (PUT): mean result: %s ' % statistics.mean(calculateTimeDeltas(times)))
# log.critical('RESULT (PUT): individual times: %s ' % (calculateTimeDeltas(times)))
def runGetTest(testDataPath, testDataRangeStart, testDataRangeEnd, f):
log.debug('running get tests...')
timeStart = time.perf_counter()
times = [time.perf_counter()]
for i in range(testDataRangeStart, testDataRangeEnd):
thisPath = '%s/%i' % (testDataPath, i)
o = f.getObject(str(i))
saveTestData(o, thisPath)
times.append(time.perf_counter())
timeEnd = time.perf_counter()
log.critical('RESULT (GET): total test runtime: %s seconds, mean per object: %s' % (
timeEnd - timeStart, ((timeEnd - timeStart) / testDataRangeEnd)))
log.critical('RESULT (GET): median result: %s ' % statistics.median(calculateTimeDeltas(times)))
log.critical('RESULT (GET): standard deviation result: %s ' % statistics.stdev(calculateTimeDeltas(times)))
log.critical('RESULT (GET): mean result: %s ' % statistics.mean(calculateTimeDeltas(times)))
# log.critical('RESULT (GET): individual times: %s ' % (calculateTimeDeltas(times)))
def runDeleteTest(testDataRangeStart, testDataRangeEnd, f):
log.debug('running delete tests...')
timeStart = time.perf_counter()
times = [time.perf_counter()]
for i in range(testDataRangeStart, testDataRangeEnd):
f.deleteObject(str(i))
times.append(time.perf_counter())
timeEnd = time.perf_counter()
log.critical('RESULT (DELETE): total test runtime: %s seconds, mean per object: %s' % (
timeEnd - timeStart, ((timeEnd - timeStart) / testDataRangeEnd)))
log.critical('RESULT (DELETE): median result: %s ' % statistics.median(calculateTimeDeltas(times)))
log.critical('RESULT (DELETE): standard deviation result: %s ' % statistics.stdev(calculateTimeDeltas(times)))
log.critical('RESULT (DELETE): mean result: %s ' % statistics.mean(calculateTimeDeltas(times)))
# log.critical('RESULT (DELETE): individual times: %s ' % (calculateTimeDeltas(times)))
###############################################################################
###############################################################################
def gain_xp(self, message):
user = message.author
id = user.id
if self.check_joined(id):
if id in self.gettingxp:
seconds = abs(self.gettingxp[id] - int(time.perf_counter()))
if seconds >= self.cooldown:
self.add_xp(id)
self.gettingxp[id] = int(time.perf_counter())
fileIO("data/levels/leader_board.json", "save", self.leader_board)
if self.leader_board[user.id]["XP"] >= self.get_level_xp(self.leader_board[user.id]["rank"]):
self.leader_board[user.id]["rank"] += 1
self.leader_board[user.id]["XP"] = 0
msg = '{} **has leveled up and is now level {}!!!\n HURRAY!!**'
msg = msg.format(message.author.display_name, self.leader_board[user.id]["rank"])
await self.bot.send_message(message.channel, msg)
fileIO("data/levels/leader_board.json", "save", self.leader_board)
else:
self.add_xp(id)
self.gettingxp[id] = int(time.perf_counter())
fileIO("data/levels/leader_board.json", "save", self.leader_board)
def rate_limit(wait_length):
last_time = 0
def decorate(f):
@wraps(f)
def rate_limited(*args, **kwargs):
nonlocal last_time
diff = perf_counter() - last_time
if diff < wait_length:
sleep(wait_length - diff)
r = f(*args, **kwargs)
last_time = perf_counter()
return r
return rate_limited
return decorate
def eventuallyAny(coroFunc, *args, retryWait: float = 0.01,
timeout: float = 5):
start = time.perf_counter()
def remaining():
return start + timeout - time.perf_counter()
remain = remaining()
data = None
while remain >= 0:
res = await coroFunc(*args)
(complete, data) = res
if complete:
return data
remain = remaining()
if remain > 0:
await asyncio.sleep(retryWait)
remain = remaining()
return data
def put_load():
port = genHa()[1]
ha = HA('0.0.0.0', port)
name = "hello"
wallet = Wallet(name)
wallet.addIdentifier(
signer=DidSigner(seed=b'000000000000000000000000Steward1'))
client = Client(name, ha=ha)
with Looper(debug=getConfig().LOOPER_DEBUG) as looper:
looper.add(client)
print('Will send {} reqs in all'.format(numReqs))
requests = sendRandomRequests(wallet, client, numReqs)
start = perf_counter()
for i in range(0, numReqs, numReqs // splits):
print('Will wait for {} now'.format(numReqs // splits))
s = perf_counter()
reqs = requests[i:i + numReqs // splits + 1]
waitForSufficientRepliesForRequests(looper, client, requests=reqs,
customTimeoutPerReq=100,
override_timeout_limit=True)
print('>>> Got replies for {} requests << in {}'.
format(numReqs // splits, perf_counter() - s))
end = perf_counter()
print('>>>Total {} in {}<<<'.format(numReqs, end - start))
exit(0)
def simulate(self):
"""
Do simulation for all enabled components.
"""
timers = []
for compID in self.componentsID:
t1 = time.perf_counter()
self.simulate_component(compID)
t2 = time.perf_counter()
timers.append((compID, t1, t2))
logger.info("==================================================")
logger.info(">>> Time usage <<<")
logger.info("==================================================")
for compID, t1, t2 in timers:
logger.info("%s : %.1f [min]" % (compID, (t2-t1)/60))
logger.info("--------------------------------------------------")
def _task_test(self, **kwargs):
"""
Test task ...
"""
import time
t1_start = time.perf_counter()
t2_start = time.process_time()
logger.info("Console TEST task: START ...")
for i in range(kwargs["time"]):
logger.info("Console TEST task: slept {0} seconds ...".format(i))
time.sleep(1)
logger.info("Console TEST task: DONE!")
t1_stop = time.perf_counter()
t2_stop = time.process_time()
logger.info("Elapsed time: {0:.3f} (s)".format(t1_stop - t1_start))
logger.info("CPU process time: {0:.3f} (s)".format(t2_stop - t2_start))
return (True, None)