def test_reportchars_all(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"SKIP*four*",
"XFAIL*test_2*",
"XPASS*test_3*",
])
python类xfail()的实例源码
def test_xfail_skipif_with_globals(testdir):
testdir.makepyfile("""
import pytest
x = 3
@pytest.mark.skipif("x == 3")
def test_skip1():
pass
@pytest.mark.xfail("x == 3")
def test_boolean():
assert 0
""")
result = testdir.runpytest("-rsx")
result.stdout.fnmatch_lines([
"*SKIP*x == 3*",
"*XFAIL*test_boolean*",
"*x == 3*",
])
def test_imperativeskip_on_xfail_test(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_that_fails():
assert 0
@pytest.mark.skipif("True")
def test_hello():
pass
""")
testdir.makeconftest("""
import pytest
def pytest_runtest_setup(item):
pytest.skip("abc")
""")
result = testdir.runpytest("-rsxX")
result.stdout.fnmatch_lines_random("""
*SKIP*abc*
*SKIP*condition: True*
*2 skipped*
""")
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2,2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile("""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print (sys.stdout)
print (%s)
""" % obj)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform == "darwin":
pytest.xfail("pexpect does not work reliably on darwin?!")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def pytest_configure(config):
if config.option.runxfail:
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = XFailed
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
)
def test_status_update(self, path1):
# not a mark because the global "pytestmark" will end up overwriting a mark here
pytest.xfail("svn-1.7 has buggy 'status --xml' output")
r = path1
try:
r.update(rev=1)
s = r.status(updates=1, rec=1)
# Comparing just the file names, because paths are unpredictable
# on Windows. (long vs. 8.3 paths)
import pprint
pprint.pprint(s.allpath())
assert r.join('anotherfile').basename in [item.basename for
item in s.update_available]
#assert len(s.update_available) == 1
finally:
r.update()
def ctc_model(a_backend):
""" Returns a model which uses the CTC loss function.
"""
if a_backend.get_name() == 'pytorch':
pytest.xfail('Backend "{}" does not use a CTC loss function.'
.format(a_backend.get_name()))
output_timesteps = 10
vocab_size = 4
return model_with_containers(
backend=a_backend,
containers=[
{'input' : {'shape' : [output_timesteps, 2]}, 'name' : 'TEST_input'},
{'recurrent' : {'size' : vocab_size+1, 'sequence' : True}},
{'activation' : 'softmax', 'name' : 'TEST_output'}
]
)
###############################################################################
def test_expect_default_handler_unknown(loop, test_client):
"""Test default Expect handler for unknown Expect value.
A server that does not understand or is unable to comply with any of
the expectation values in the Expect field of a request MUST respond
with appropriate error status. The server MUST respond with a 417
(Expectation Failed) status if any of the expectations cannot be met
or, if there are other problems with the request, some other 4xx
status.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
"""
@asyncio.coroutine
def handler(request):
yield from request.post()
pytest.xfail('Handler should not proceed to this point in case of '
'unknown Expect header')
app = web.Application()
app.router.add_post('/', handler)
client = yield from test_client(app)
resp = yield from client.post('/', headers={'Expect': 'SPAM'})
assert 417 == resp.status
def test_item(girder_client, private_folder):
try:
item = girder_client.createItem(private_folder['_id'], 'test')
yield item
finally:
if file is not None:
girder_client.delete('item/%s' % item['_id'])
# pytest hooks for ordering test items after they have been collected
# and ensuring tests marked with sanitycheck run first.
# pytest_runtest_makereport and pytest_runtest_setup are used to xfail
# all tests if any of the sanitychecks fail.
def pytest_runtest_setup(item):
session = item.parent.parent
sanitycheckfailed = getattr(session, '_sanitycheckfailed', None)
if sanitycheckfailed is not None:
pytest.xfail('previous test failed (%s)' % sanitycheckfailed.name)
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
try:
pytest.xfail(str(reason))
except pytest.xfail.Exception:
self._addexcinfo(sys.exc_info())
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--runxfail',
action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config):
if config.option.runxfail:
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = XFailed
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skip(reason=None): skip the given test function with an optional reason. "
"Example: skip(reason=\"no way of currently testing this\") skips the "
"test."
)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See http://pytest.org/latest/skipping.html"
)
def pytest_namespace():
return dict(xfail=xfail)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.passed:
return "xpassed", "X", ("XPASS", {'yellow': True})
# called by the terminalreporter instance/plugin
def test_dsdf_exact(eval_buffer):
""" Following the direction ends up on the surface in a single step
This test conditionally xfails if the DSDF of this shape is not exact.
`test_dsdf_approximate` ensures that the approximation is correct """
for v in eval_buffer.array:
if v[1]["w"] != pytest.approx(0, abs=1e-5):
pytest.xfail()
def test_dsdf_approximate(eval_buffer):
""" Following the direction must get closer to the surface with each step """
for v in eval_buffer.array:
if v[0]["w"] == pytest.approx(0, abs=1e-5) or abs(v[1]["w"]) < abs(v[0]["w"]):
pass
else:
pytest.xfail()