def test_server_logprob_normalized(N, V, C, M):
model = generate_fake_model(N, V, C, M)
config = TINY_CONFIG.copy()
config['model_num_clusters'] = M
model['config'] = config
server = TreeCatServer(model)
# The total probability of all categorical rows should be 1.
ragged_index = model['suffstats']['ragged_index']
factors = []
for v in range(V):
C = ragged_index[v + 1] - ragged_index[v]
factors.append([one_hot(c, C) for c in range(C)])
data = np.array(
[np.concatenate(columns) for columns in itertools.product(*factors)],
dtype=np.int8)
logprobs = server.logprob(data)
logtotal = np.logaddexp.reduce(logprobs)
assert logtotal == pytest.approx(0.0, abs=1e-5)
python类approx()的实例源码
def test_train_pre_prepped(df_train):
num_workers = 1
params = {'num_rounds': 1}
df_grouped, j_groups = mjolnir.training.xgboost.prep_training(
df_train, num_workers)
params['groupData'] = j_groups
# TODO: This is probably not how we should make sure it isn't called..
orig_prep_training = mjolnir.training.xgboost.prep_training
try:
mjolnir.training.xgboost.prep_training = _always_raise
model = mjolnir.training.xgboost.train(df_grouped, params)
assert 0.74 == pytest.approx(model.eval(df_grouped, j_groups), abs=0.01)
finally:
mjolnir.training.xgboost.prep_training = orig_prep_training
def test_split(spark_context, hive_context):
df = (
hive_context
.range(1, 100 * 100)
# convert into 100 "queries" with 100 values each. We need a
# sufficiently large number of queries, or the split wont have
# enough data for partitions to even out.
.select(F.lit('foowiki').alias('wikiid'),
(F.col('id')/100).cast('int').alias('norm_query_id')))
with_folds = mjolnir.training.tuning.split(df, (0.8, 0.2), num_partitions=4).collect()
fold_0 = [row for row in with_folds if row.fold == 0]
fold_1 = [row for row in with_folds if row.fold == 1]
# Check the folds are pretty close to requested
total_len = float(len(with_folds))
assert 0.8 == pytest.approx(len(fold_0) / total_len, abs=0.015)
assert 0.2 == pytest.approx(len(fold_1) / total_len, abs=0.015)
# Check each norm query is only found on one side of the split
queries_in_0 = set([row.norm_query_id for row in fold_0])
queries_in_1 = set([row.norm_query_id for row in fold_1])
assert len(queries_in_0.intersection(queries_in_1)) == 0
def test_ndcg_doesnt_completely_fail(spark_context, hive_context):
"Mediocre test that just looks for a happy path"
df = spark_context.parallelize([
[4, 0, 'foo'],
[3, 1, 'foo'],
[0, 2, 'foo'],
[3, 3, 'foo'],
]).toDF(['label', 'hit_position', 'query'])
# Top 2 are in perfect order. Also this indirectly tests that
# k is really top 2, and not somehow top 3 or some such
ndcg_at_2 = mjolnir.metrics.ndcg(df, 2, query_cols=['query'])
assert 1.0 == ndcg_at_2
# Top 4 are slightly out. This value was checked by also
# calculating by hand.
ndcg_at_4 = mjolnir.metrics.ndcg(df, 4, query_cols=['query'])
assert 0.9788 == pytest.approx(ndcg_at_4, abs=0.0001)
def test_adjoint(dtype, shearletSystem):
"""Validate the adjoint."""
shape = tuple(shearletSystem['size'])
# load data
X = np.random.randn(*shape).astype(dtype)
# decomposition
coeffs = pyshearlab.SLsheardec2D(X, shearletSystem)
# adjoint
Xadj = pyshearlab.SLshearadjoint2D(coeffs, shearletSystem)
assert Xadj.dtype == X.dtype
assert Xadj.shape == X.shape
# <Ax, Ax> should equal <x, AtAx>
assert (pytest.approx(np.vdot(coeffs, coeffs), rel=1e-3, abs=0) ==
np.vdot(X, Xadj))
def test_adjoint_of_inverse(dtype, shearletSystem):
"""Validate the adjoint of the inverse."""
X = np.random.randn(*shearletSystem['size']).astype(dtype)
# decomposition
coeffs = pyshearlab.SLsheardec2D(X, shearletSystem)
# reconstruction
Xrec = pyshearlab.SLshearrec2D(coeffs, shearletSystem)
Xrecadj = pyshearlab.SLshearrecadjoint2D(Xrec, shearletSystem)
assert Xrecadj.dtype == X.dtype
assert Xrecadj.shape == coeffs.shape
# <A^-1x, A^-1x> = <A^-* A^-1 x, x>.
assert (pytest.approx(np.vdot(Xrec, Xrec), rel=1e-3, abs=0) ==
np.vdot(Xrecadj, coeffs))
def test_hybrid_jaccard_similarity():
# use a fixed test cases here only to test hybrid jaccard itself.
def test_function(m, n):
if m == 'a' and n == 'p':
return 0.7
if m == 'a' and n == 'q':
return 0.8
if m == 'b' and n == 'p':
return 0.5
if m == 'b' and n == 'q':
return 0.9
if m == 'c' and n == 'p':
return 0.2
if m == 'c' and n == 'q':
return 0.1
assert pytest.approx(hybrid_jaccard_similarity(set(['a','b','c']), set(['p', 'q']), function=test_function),
0.001) == 0.5333
def test_forward_works_without_mask(self):
log_likelihood = self.crf(self.logits, self.tags).data[0]
# Now compute the log-likelihood manually
manual_log_likelihood = 0.0
# For each instance, manually compute the numerator
# (which is just the score for the logits and actual tags)
# and the denominator
# (which is the log-sum-exp of the scores for the logits across all possible tags)
for logits_i, tags_i in zip(self.logits, self.tags):
numerator = self.score(logits_i.data, tags_i.data)
all_scores = [self.score(logits_i.data, tags_j) for tags_j in itertools.product(range(5), repeat=3)]
denominator = math.log(sum(math.exp(score) for score in all_scores))
# And include them in the manual calculation.
manual_log_likelihood += numerator - denominator
# The manually computed log likelihood should equal the result of crf.forward.
assert manual_log_likelihood == approx(log_likelihood)
def test_write_data(tmpdir):
"""test writing data to disp.dat
"""
from create_disp_dat import open_dispout
from create_disp_dat import process_timestep_data
import struct
from pytest import approx
fname = tmpdir.join('testdata.dat')
dispout = open_dispout(fname.strpath)
data = []
data.append([float(0.0), float(0.1), float(0.2), float(0.3)])
data.append([float(1.0), float(1.1), float(1.2), float(1.3)])
process_timestep_data(data, dispout, writenode=True)
dispout.close()
with open(fname, 'rb') as f:
d = struct.unpack(8 * 'f', f.read(4 * 8))
assert d[0] == 0.0
assert d[1] == 1.0
assert d[2] == approx(0.1)
assert d[3] == approx(1.1)
assert d[7] == approx(1.3)
def assert_vec(vec, x, y, z, msg=''):
"""Asserts that Vec is equal to (x,y,z)."""
# Don't show in pytest tracebacks.
__tracebackhide__ = True
# Ignore slight variations
if not vec.x == pytest.approx(x):
failed = 'x'
elif not vec.y == pytest.approx(y):
failed = 'y'
elif not vec.z == pytest.approx(z):
failed = 'z'
else:
# Success!
return
new_msg = "{!r} != ({}, {}, {})".format(vec, failed, x, y, z)
if msg:
new_msg += ': ' + msg
pytest.fail(new_msg)
def test_lift(self):
quad = Quadcopter(mass=0.5, motor_thrust=0.5)
assert quad.position == (0, 0, 0)
assert quad.rpy == (0, 0, 0)
#
# power all the motors, to lift the quad vertically. The motors give a
# total acceleration of 4g. Considering the gravity, we have a total
# net acceleration of 3g.
t = 1 # second
g = 9.81 # m/s**2
z = 0.5 * (3*g) * t**2 # d = 1/2 * a * t**2
#
quad.set_thrust(1, 1, 1, 1)
quad.run(t=1, dt=0.0001)
pos = quad.position
assert pos.x == 0
assert pos.y == 0
assert pos.z == approx(z, rel=1e-3) # the simulated z is a bit
# different than the computed one
assert quad.rpy == (0, 0, 0)
def test_save_load_ride_pp():
filename = load_toy()[0]
my_ride_rpp = RidePowerProfile(max_duration_profile=1)
my_ride_rpp.fit(filename)
tmp_dir = mkdtemp()
try:
store_filename = os.path.join(tmp_dir, 'ride_rpp.pkl')
my_ride_rpp.save_to_pickles(store_filename)
obj = RidePowerProfile.load_from_pickles(store_filename)
assert_allclose(my_ride_rpp.data_, obj.data_)
assert_allclose(my_ride_rpp.data_norm_, obj.data_norm_)
assert my_ride_rpp.cyclist_weight == pytest.approx(obj.cyclist_weight)
assert my_ride_rpp.max_duration_profile == obj.max_duration_profile
assert my_ride_rpp.date_profile_ == obj.date_profile_
assert my_ride_rpp.filename_ == obj.filename_
finally:
shutil.rmtree(tmp_dir)
def test_ridepp_fit_w_weight():
filename = load_toy()[0]
ride_rpp = RidePowerProfile(max_duration_profile=1, cyclist_weight=60.)
ride_rpp.fit(filename)
data = np.array([
0., 500., 475.5, 469.33333333, 464., 463., 462.33333333, 461.71428571,
455.875, 450.55555556, 447.3, 444.81818182, 442.08333333, 439.53846154,
435.71428571, 432.06666667, 428.75, 424.35294118, 420.44444444,
413.78947368, 409.9, 407.23809524, 402.5, 399.91304348, 396.45833333,
394.76, 392.19230769, 388.62962963, 384.75, 380., 373.8, 367.70967742,
362.96875, 357.90909091, 354.02941176, 349.68571429, 345.83333333,
342.18918919, 338.36842105, 335.02564103, 331.375, 328.95121951,
325.64285714, 322.37209302, 318.09090909, 315.15555556, 312.23913043,
309.59574468, 307.08333333, 304.55102041, 301.9, 300.70588235, 300.5,
299.90566038, 300.03703704, 298.92727273, 298.10714286, 297.56140351,
296.48275862, 296.30508475
])
assert_allclose(ride_rpp.data_, data)
assert ride_rpp.data_norm_ == pytest.approx(data / 60.)
assert ride_rpp.cyclist_weight == pytest.approx(60.)
assert ride_rpp.max_duration_profile == 1
assert ride_rpp.date_profile_ == date(2014, 5, 7)
assert ride_rpp.filename_ == filename
def test_numpymatrix_transpose():
"""Passing a matrix instead of a list failed because the array is now a
view instead of the original data structure."""
s = np.matrix([
[0., 0., 1.,],
[0, 1, 2],
[1, 2, 0],
[2, 0, 0],
[1, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 0]
]).T
m = dtw_c.distance_matrix_nogil(s)
m2 = dtw.distance_matrix(s)
correct = np.array([
[np.inf, 1.41421356, 1.73205081],
[np.inf, np.inf, 1.41421356],
[np.inf, np.inf, np.inf]])
assert m[0, 1] == pytest.approx(math.sqrt(2))
assert m2[0, 1] == pytest.approx(math.sqrt(2))
np.testing.assert_almost_equal(correct, m, decimal=4)
np.testing.assert_almost_equal(correct, m2, decimal=4)
def test_form_hlrf_correlation2():
def limit_state(x1, x2, x3):
"""From choi 2007 p. 224
x1 = Ma
x2 = P1
x3 = P2
"""
return x1 - x2 - 2*x3
X = StochasticModel(['norm', 50, 5],
['norm', 10, 2],
['norm', 15, 3])
x, beta, i = form_hlrf_correlation(limit_state, X, tol=1e-5)
pf = stats.norm.cdf(-beta)
assert pytest.approx(pf, rel=1e-2) == 0.1073
def test_form_hlrf_correlation3():
def limit_state(x1, x2, x3):
"""From choi 2007 p. 224
x1 = Ma
x2 = P1
x3 = P2
"""
return x1 - x2 - 2*x3
X = StochasticModel(['norm', 50, 5],
['norm', 10, 2],
['norm', 15, 3])
X.add_correlation(2, 3, .25)
x, beta, i = form_hlrf_correlation(limit_state, X, tol=1e-5)
pf = stats.norm.cdf(-beta)
assert pytest.approx(pf, rel=1e-2) == 0.1171
def test_lapjv_arr_loop():
shape = (7, 3)
cc = np.array([
2.593883482138951146e-01, 3.080381437461217620e-01,
1.976243020727339317e-01, 2.462740976049606068e-01,
4.203993396282833528e-01, 4.286184525458427985e-01,
1.706431415909629434e-01, 2.192929371231896185e-01,
2.117769622802734286e-01, 2.604267578125001315e-01])
ii = np.array([0, 0, 1, 1, 2, 2, 5, 5, 6, 6])
jj = np.array([0, 1, 0, 1, 1, 2, 0, 1, 0, 1])
cost = np.empty(shape)
cost[:] = 1000.
cost[ii, jj] = cc
opt, ind1, ind0 = lapjv(cost, extend_cost=True, return_cost=True)
assert opt == approx(0.8455356917416, 1e-10)
assert np.all(ind0 == [5, 1, 2]) or np.all(ind0 == [1, 5, 2])
def test_lapmod_arr_loop():
shape = (7, 3)
cc = np.array([
2.593883482138951146e-01, 3.080381437461217620e-01,
1.976243020727339317e-01, 2.462740976049606068e-01,
4.203993396282833528e-01, 4.286184525458427985e-01,
1.706431415909629434e-01, 2.192929371231896185e-01,
2.117769622802734286e-01, 2.604267578125001315e-01])
ii = np.array([0, 0, 1, 1, 2, 2, 5, 5, 6, 6])
jj = np.array([0, 1, 0, 1, 1, 2, 0, 1, 0, 1])
cost_limit = 1e3
cc, ii, kk = prepare_sparse_cost(shape, cc, ii, jj, cost_limit)
opt, ind1, ind0 = lapmod(len(ii)-1, cc, ii, kk, return_cost=True)
ind1[ind1 >= shape[1]] = -1
ind0[ind0 >= shape[0]] = -1
ind1 = ind1[:shape[0]]
ind0 = ind0[:shape[1]]
assert opt == approx(4000.8455356917416, 1e-10)
assert np.all(ind0 == [5, 1, 2]) or np.all(ind0 == [1, 5, 2])
def test_box_pr_curve():
approx = lambda prc: [(round(p, 2), round(r, 2), s) for p, r, s in prc]
boxes1 = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
boxes2 = [(2, 1, 2, 3), (4, 3, 2, 3)]
scores1 = [0.5, 0.2, 0.1]
scores2 = [0.5, 0.2]
pr_curve = list(nm.box_pr_curve(boxes2, boxes2, scores2))
expected = [(1.0, 0.5, 0.5), (1.0, 1.0, 0.2)]
assert pr_curve == expected
pr_curve = list(nm.box_pr_curve(boxes1, boxes2, scores2))
expected = [(1.0, 0.33, 0.5), (1.0, 0.67, 0.2)]
assert approx(pr_curve) == expected
pr_curve = list(nm.box_pr_curve(boxes2, boxes1, scores1))
expected = [(1.0, 0.5, 0.5), (1.0, 1.0, 0.2), (0.67, 1.0, 0.1)]
assert approx(pr_curve) == expected
pr_curve = list(nm.box_pr_curve(boxes1, [], []))
assert pr_curve == []
pr_curve = list(nm.box_pr_curve([], boxes1, scores1))
assert pr_curve == []
def test_box_avg_precision():
boxes1 = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
scores1 = [0.5, 0.2, 0.1]
boxes2 = [(2, 1, 2, 3), (4, 3, 2, 3)]
scores2 = [0.5, 0.2]
ap = nm.box_avg_precision(boxes2, boxes2, scores2)
assert ap == 1.0
ap = nm.box_avg_precision(boxes1, boxes2, scores2)
assert ap == approx(0.63, abs=1e-2)
ap = nm.box_avg_precision(boxes2, boxes1, scores1)
assert ap == 1.0
ap = nm.box_avg_precision(boxes1, [], [])
assert ap == 0.0
ap = nm.box_avg_precision([], boxes1, scores1)
assert ap == 0.0
def test_box_mean_avg_precision():
boxes1 = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
labels1 = ['class1', 'class2', 'class1']
scores1 = [0.5, 0.2, 0.1]
boxes2 = [(2, 1, 2, 3), (4, 3, 2, 3)]
labels2 = ['class1', 'class2']
scores2 = [0.5, 0.2]
mAP = nm.box_mean_avg_precision(boxes1, labels1, boxes1, labels1, scores1)
assert mAP == 1.0
mAP = nm.box_mean_avg_precision(boxes2, labels2, boxes2, labels2, scores2)
assert mAP == 1.0
mAP = nm.box_mean_avg_precision(boxes1, labels1, [], [], [])
assert mAP == 0.0
mAP = nm.box_mean_avg_precision([], [], boxes1, labels1, scores1)
assert mAP == 0.0
mAP = nm.box_mean_avg_precision(boxes1, labels1, boxes2, labels2, scores2)
assert mAP == approx(0.77, abs=1e-2)
def test_pix_center(self, galaxy):
"""Tests mode='pix', xyorig='center'."""
coords = [[0, 0],
[5, 3],
[-5, 1],
[1, -5],
[10, 10],
[-10, -10],
[1.5, 2.5],
[0.4, 0.25]]
expected = [[17, 17],
[20, 22],
[18, 12],
[12, 18],
[27, 27],
[7, 7],
[20, 18],
[17, 17]]
cubeCoords = convertCoords(coords, mode='pix', shape=galaxy.shape)
pytest.approx(cubeCoords, np.array(expected))
def test_pix_lower(self, galaxy):
"""Tests mode='pix', xyorig='lower'."""
coords = [[0, 0],
[5, 3],
[10, 10],
[1.5, 2.5],
[0.4, 0.25]]
expected = [[0, 0],
[3, 5],
[10, 10],
[2, 2],
[0, 0]]
cubeCoords = convertCoords(coords, mode='pix', shape=galaxy.shape,
xyorig='lower')
pytest.approx(cubeCoords, np.array(expected))
def test_map(self, map_, galaxy):
assert map_.release == galaxy.release
assert tuple(map_.shape) == tuple(galaxy.shape)
assert map_.value.shape == tuple(galaxy.shape)
assert map_.ivar.shape == tuple(galaxy.shape)
assert map_.mask.shape == tuple(galaxy.shape)
assert (map_.masked.data == map_.value).all()
assert (map_.masked.mask == map_.mask.astype(bool)).all()
assert pytest.approx(map_.snr, np.abs(map_.value * np.sqrt(map_.ivar)))
assert map_.header['BUNIT'] == map_.unit
assert isinstance(map_.header, astropy.io.fits.header.Header)
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail(self.message)
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
return suppress_exception
# builtin pytest.approx helper
def test_build_from_vectors(self):
# Can build it with int or float or nothing
msg_tmpl = "%s vs (expected) %s (args=%s)"
for args, expected_normal, expected_d in (
[(), (0, 0, 0), 0],
[(Vector3(0, 0, 0), Vector3(4, 5, 6), Vector3(7, 8, 9)), (0.40824827551841736, -0.8164965510368347, 0.40824827551841736), 0.0],
):
v = Plane.build_from_vectors(*args)
normal = (pytest.approx(v.normal.x), pytest.approx(v.normal.y), pytest.approx(v.normal.z))
assert normal == expected_normal, msg_tmpl % (v.normal, expected_normal, args)
assert v.d == expected_d, msg_tmpl % (v.d, expected_d, args)
with pytest.raises(TypeError):
Plane.build_from_vectors("a", Vector3(4, 5, 6), Vector3(7, 8, 9))
with pytest.raises(TypeError):
Plane.build_from_vectors(Vector3(1, 2, 3), "b", Vector3(7, 8, 9))
with pytest.raises(TypeError):
Plane.build_from_vectors(Vector3(1, 2, 3), Vector3(4, 5, 6), "c")
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail(self.message)
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
return suppress_exception
# builtin pytest.approx helper
def test_weather(self):
"""Test for weather.py"""
epw_name = "SGP_Singapore.486980_IWEC.epw"
climate_file = os.path.join(self.DIR_EPW_PATH, epw_name)
self.weather = UWG.Weather(climate_file,self.simTime.timeInitial,self.simTime.timeFinal)
# Weather Tests
assert len(self.weather.staDif) == pytest.approx(self.simTime.timeFinal - self.simTime.timeInitial + 1, abs=1e-6)
assert len(self.weather.staHum) == pytest.approx(self.simTime.timeFinal - self.simTime.timeInitial + 1, abs=1e-6)
assert len(self.weather.staTemp) == pytest.approx(self.simTime.timeFinal - self.simTime.timeInitial + 1, abs=1e-6)
assert self.weather.staTemp[3] == pytest.approx(24.+273.15, abs=1e-6)
assert self.weather.staTemp[-1] == pytest.approx(27.+273.15, abs=1e-6)
assert self.weather.staUdir[2] == pytest.approx(270, abs=1e-1) # 270 deg
assert self.weather.staUmod[4] == pytest.approx(.5, abs=1e-6) # 0.5 m/s
assert self.weather.staPres[10] == pytest.approx(100600., abs=1e-1)
assert self.weather.staInfra[13] == pytest.approx(428., abs=1e-1)
assert self.weather.staDif[6] == pytest.approx(0., abs=1e-3)
assert self.weather.staDif[8] == pytest.approx(95., abs=1e-6)
assert self.weather.staRobs[8] == pytest.approx(0.0, abs=1e-3) # 0. mm/hre
def test_read_epw(self):
self.setup_init_uwg()
self.uwg.read_epw()
# test header
assert self.uwg._header[0][0] == "LOCATION"
assert self.uwg._header[0][1] == "SINGAPORE"
assert self.uwg.lat == pytest.approx(1.37, abs=1e-3)
assert self.uwg.lon == pytest.approx(103.98, abs=1e-3)
assert self.uwg.GMT == pytest.approx(8, abs=1e-3)
# test soil data
assert self.uwg.nSoil == pytest.approx(3, abs=1e-2)
# test soil depths
assert self.uwg.depth_soil[0][0] == pytest.approx(0.5, abs=1e-3)
assert self.uwg.depth_soil[1][0] == pytest.approx(2., abs=1e-3)
assert self.uwg.depth_soil[2][0] == pytest.approx(4., abs=1e-3)
# test soil temps over 12 months
assert self.uwg.Tsoil[0][0] == pytest.approx(27.55+273.15, abs=1e-3)
assert self.uwg.Tsoil[1][2] == pytest.approx(28.01+273.15, abs=1e-3)
assert self.uwg.Tsoil[2][11] == pytest.approx(27.07+273.15, abs=1e-3)
# test time step in weather file
assert self.uwg.epwinput[0][0] == "1989"
assert float(self.uwg.epwinput[3][6]) == pytest.approx(24.1,abs=1e-3)
def test_sleep(command):
# measure the round-trip-time
timestamp = monotonic()
stdout, stderr, returncode = command.run('true')
elapsed_true = monotonic() - timestamp
assert returncode == 0
assert len(stdout) == 0
assert len(stderr) == 0
timestamp = monotonic()
stdout, stderr, returncode = command.run('sleep 1')
elapsed_sleep = monotonic() - timestamp
assert returncode == 0
assert len(stdout) == 0
assert len(stderr) == 0
assert elapsed_true < elapsed_sleep
assert elapsed_sleep - elapsed_true == approx(1.0, abs=1e-2)