def measure_best(repeat, iters,
common_setup='pass',
common_cleanup='pass',
*funcs):
funcs = list(funcs)
results = dict([(f, []) for f in funcs])
for i in six.moves.range(repeat):
random.shuffle(funcs)
for func in funcs:
gc.collect()
t = timeit.Timer(func, setup=common_setup)
results[func].append(t.timeit(iters))
common_cleanup()
best_results = {}
for func, times in six.iteritems(results):
best_results[func] = min(times)
return best_results
python类Timer()的实例源码
def easy_timer(code_to_benchmark, *, repeat=3, number=1000):
"""
Wrap timeit.Timer().repeat() to catch locals.
Rather than put our setup statement in a string for
:py:func:`timeit.timeit`, we can just pull locals and globals
from the calling stack frame.
Args:
code_to_benchmark(str): A string containing the Python code
that we want to benchmark.
repeat(int): Number of times to repeat the timer trial.
number(int): Number of iterations **per** trial.
Returns:
(float): The best measured time of ``repeat`` times.
"""
timer = timeit.Timer(stmt=code_to_benchmark, globals=copy_environment(2))
best_time = min(timer.repeat(repeat=repeat, number=number))
return best_time
def print_easy_timer(code_to_benchmark, *, repeat=3, number=1000):
"""
Repeatedly time code and print results.
Args:
code_to_benchmark(str): A string containing the Python code
that we want to benchmark.
repeat(int): Number of times to repeat the timer trial.
number(int): Number of iterations **per** trial.
Returns:
(float): The best measured time of ``repeat`` times.
"""
timer = timeit.Timer(stmt=code_to_benchmark, globals=copy_environment(2))
best_time = min(timer.repeat(repeat=repeat, number=number))
print(":\t\t".join((
code_to_benchmark,
str(best_time)
)))
return best_time
def timer(s, v='', nloop=500, nrep=3):
units = ["s", "ms", "µs", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
print("%s : %-50s : " % (v, s), end=' ')
varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
Timer = timeit.Timer(stmt=s, setup=setup)
best = min(Timer.repeat(nrep, nloop)) / nloop
if best > 0.0:
order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
else:
order = 3
print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
3,
best * scaling[order],
units[order]))
def timetest(command, info, info2='2 floats', num=100, numt=1, mem=16384):
initct(mem)
print " "
print info
print "Timing over", num*num, "calls to tiles,", numt, "tiling each for", info2
t= timeit.Timer(command + '('+str(num)+','+str(mem)+','+str(numt)+')', 'from __main__ import ' + command)
print "With no collision table", t.timeit(1), "seconds"
t= timeit.Timer(command + '('+str(num)+', ctu'+','+str(numt)+')', 'from __main__ import ctu, ' + command)
print "With unsafe collision table", t.timeit(1), "seconds"
print ctu
t= timeit.Timer(command + '('+str(num)+', cts'+','+str(numt)+')', 'from __main__ import cts, ' + command)
print "With safe collision table", t.timeit(1), "seconds"
print cts
t= timeit.Timer(command + '('+str(num)+', ctss'+','+str(numt)+')', 'from __main__ import ctss, ' + command)
print "With super safe collision table", t.timeit(1), "seconds"
print ctss
print " "
#print "Timing over", num*num, "calls to tiles, 16 tilings each for", info2
#t= timeit.Timer(command + '('+str(num)+', 16384, 16)', 'from __main__ import ' + command)
#print "With no collision table", t.timeit(1), "seconds"
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print('engine: %0.3f ms per iteration' % (results * 1000))
t = Timer(c1)
results = t.timeit(options.num) / options.num
print('coroutine: %0.3f ms per iteration' % (results * 1000))
def time_stmt(stmt='pass', setup='pass', number=0, repeat=3):
"""Timer function with the same behaviour as running `python -m timeit `
in the command line.
:return: elapsed time in seconds or NaN if the command failed.
:rtype: float
"""
t = Timer(stmt, setup)
if not number:
# determine number so that 0.2 <= total time < 2.0
for i in range(1, 10):
number = 10**i
try:
x = t.timeit(number)
except:
print(t.print_exc())
return float('NaN')
if x >= 0.2:
break
try:
r = t.repeat(repeat, number)
except:
print(t.print_exc())
return float('NaN')
best = min(r)
return best / number
def benchmark(stmt, n=1000, r=3):
setup = (
'from ansimarkup import parse;'
'from colorama import Style as S, Fore as F;'
'from termcolor import colored;'
'from colr import color;'
'from plumbum import colors;'
'from pastel import colorize'
)
timer = Timer(stmt, setup=setup)
best = min(timer.repeat(r, n))
usec = best * 1e6 / n
return usec
def get_timeit(self, setup):
return min(timeit.Timer(
'for n in range(64, 10000): _sample_n_k(n, 64)',
setup=setup). repeat(repeat=10, number=1))
def measure_pattern_time_v2(iteration_number, size, pattern):
gw = execnet.makegateway("popen//python=python2.7")
channel = gw.remote_exec("""
from nltk.corpus import brown
words = brown.words()[:%s]
text = ' '.join(words)
from pattern.en import parsetree
text_tree = parsetree(text,
tokenize = True, # Split punctuation marks from words?
tags = True, # Parse part-of-speech tags? (NN, JJ, ...)
chunks = False, # Parse chunks? (NP, VP, PNP, ...)
relations = False, # Parse chunk relations? (-SBJ, -OBJ, ...)
lemmata = False, # Parse lemmata? (ate => eat)
encoding = 'utf-8', # Input string encoding.
tagset = None) # Penn Treebank II (default) or UNIVERSAL.
from pattern.search import search
def measure_pattern_search():
global pattern_search_result #Make measure_me able to modify the value
pattern_search_result = search("%s", text_tree)
from timeit import Timer
pattern_search_time = Timer(measure_pattern_search)
def pattern_search_timeit():
runtimes = [pattern_search_time.timeit(number=1) for i in range (0, %s)]
average = sum(runtimes)/len(runtimes)
# return ''.join(['timit: #runs=', str(%s), ' ; average=', str(average),' ; min=', str(min(runtimes))])
return [runtimes, average, min(runtimes)]
channel.send(pattern_search_timeit())
""" % (size, pattern, iteration_number, iteration_number))
channel.send([])
return channel.receive()
def measure_time (Function, iteration_number):
function_time = Timer(Function)
runtimes = [function_time.timeit(number=1) for i in range (0, iteration_number)]
average = sum(runtimes)/len(runtimes)
return runtimes, average, min(runtimes)
def func_27():
jointimer = timeit.Timer('join_test()', 'from __main__ import join_test')
print(jointimer.timeit(number=100))
plustimer = timeit.Timer('plus_test()', 'from __main__ import plus_test')
print(plustimer.timeit(number=100))
def benchmark_tracer_wrap():
tracer = Tracer()
tracer.writer = DummyWriter()
# testcase
class Foo(object):
@staticmethod
@tracer.wrap()
def s():
return 0
@classmethod
@tracer.wrap()
def c(cls):
return 0
@tracer.wrap()
def m(self):
return 0
f = Foo()
# benchmark
print("## tracer.trace() wrapper benchmark: {} loops ##".format(NUMBER))
timer = timeit.Timer(f.s)
result = timer.repeat(repeat=REPEAT, number=NUMBER)
print("- staticmethod execution time: {:8.6f}".format(min(result)))
timer = timeit.Timer(f.c)
result = timer.repeat(repeat=REPEAT, number=NUMBER)
print("- classmethod execution time: {:8.6f}".format(min(result)))
timer = timeit.Timer(f.m)
result = timer.repeat(repeat=REPEAT, number=NUMBER)
print("- method execution time: {:8.6f}".format(min(result)))
def benchmark_getpid():
timer = timeit.Timer(getpid)
result = timer.repeat(repeat=REPEAT, number=NUMBER)
print("## getpid wrapper benchmark: {} loops ##".format(NUMBER))
print("- getpid execution time: {:8.6f}".format(min(result)))
def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()):
# copy of timeit.Timer.__init__
# similarity index 95%
self.timer = timer
stmt = timeit.reindent(stmt, 8)
setup = timeit.reindent(setup, 4)
src = timeit.template % {'stmt': stmt, 'setup': setup}
self.src = src # Save for traceback display
code = compile(src, timeit.dummy_src_name, "exec")
ns = {}
#exec code in globals(), ns -- original timeit code
exec_(code, globals, ns) # -- we use caller-provided globals instead
self.inner = ns["inner"]
def test_timer_invalid_stmt(self):
self.assertRaises(ValueError, timeit.Timer, stmt=None)
def test_print_exc(self):
s = io.StringIO()
t = timeit.Timer("1/0")
try:
t.timeit()
except:
t.print_exc(s)
self.assert_exc_string(s.getvalue(), 'ZeroDivisionError')
def time_regex_test_case(compiled_regex, test_case, iterations):
"""
Execute and time a single regex on a single test case
:param compiled_regex:
:param test_case:
:param iterations:
:return:
"""
try:
repeats = 10
search_string = test_case.search_string
def wrap():
# Timing bug, lazy eval defers computation if we don't
# force (convert to list evals result here)
# https://swizec.com/blog/python-and-lazy-evaluation/swizec/5148
return list(compiled_regex.finditer(search_string))
t = timeit.Timer(wrap)
repeat_iterations = t.repeat(repeat=repeats, number=iterations)
best_run = list(repeat_iterations[0])
for repeated_timeit in repeat_iterations:
if best_run[0] > list(repeated_timeit)[0]:
best_run = list(repeated_timeit)
return_vals = list(best_run)
return_vals.append(iterations)
return_vals.append(test_case)
except:
traceback.print_exc()
return return_vals
def main(sys_argv):
args = sys_argv[1:]
count = int(args[0])
print "Benchmarking: %sx" % count
print
for example in examples:
test = make_test_function(example)
t = Timer(test,)
print min(t.repeat(repeat=3, number=count))
print "Done"