def test_feature_union_fit_failure():
X, y = make_classification(n_samples=100, n_features=10, random_state=0)
pipe = Pipeline([('union', FeatureUnion([('good', MockClassifier()),
('bad', FailingClassifier())],
transformer_weights={'bad': 0.5})),
('clf', MockClassifier())])
grid = {'union__bad__parameter': [0, 1, 2]}
gs = dcv.GridSearchCV(pipe, grid, refit=False, scoring=None)
# Check that failure raises if error_score is `'raise'`
with pytest.raises(ValueError):
gs.fit(X, y)
# Check that grid scores were set to error_score on failure
gs.error_score = float('nan')
with pytest.warns(FitFailedWarning):
gs.fit(X, y)
check_scores_all_nan(gs, 'union__bad__parameter')
python类warns()的实例源码
def test_syntax_warning():
# exceed the 4 extra lines which are normally checked
with pytest.warns(SyntaxWarning) as warning_checker:
v = debug.format(
abs(
abs(
abs(
abs(
-1
)
)
)
)
)
assert len(warning_checker) == 1
warning = warning_checker.list[0]
print(warning.message)
assert 'Error: unexpected EOF while parsing (test_expr_render.py' in str(warning.message)
# check only the original code is included in the warning
assert '-1\n"' in str(warning.message)
s = re.sub(':\d{2,}', ':<line no>', str(v))
assert (
'tests/test_expr_render.py:<line no> test_syntax_warning\n 1 (int)'
) == s
def test_no_syntax_warning():
# exceed the 4 extra lines which are normally checked
debug_ = Debug(warnings=False)
with pytest.warns(None) as warning_checker:
v = debug_.format(
abs(
abs(
abs(
abs(
-1
)
)
)
)
)
assert 'test_no_syntax_warning\n 1 (int)' in str(v)
assert len(warning_checker) == 0
def test_filter_data_zero_cols(temp_dir):
"""Tests that columns are correctly remove when they are empty"""
tsd = TimeSeriesData(temp_dir + "/filter_cols.h5")
data = [1, 3, 5, 1, 1, 2, 5, 6, 5, 6, 2, 8]
indptr = [0, 3, 4, 8, 12]
indices = [1, 3, 4, 2, 0, 1, 3, 4, 0, 1, 3, 4]
sequences = ["qqq", "abc", "zzz", "hi"]
tsd.resize_data(len(indptr) - 1, len(indptr), len(data))
tsd.add_timeseries_data(data, indices, indptr, sequences)
tsd.insert_array_by_chunks("samples/names", ["a", "b", "c", "d", "e"])
tsd.insert_array_by_chunks("samples/time", [0, 10, 15, 30, 35],
transform_func = int)
tsd.insert_array_by_chunks("samples/mask", ["1", "1", "1", "1", "1"])
outfile = temp_dir + "/filtered_zeros.h5"
with pytest.warns(UserWarning):
tsd.filter_data(outfile, "abundance", 2)
filtered_tsd = TimeSeriesData(temp_dir + "/filtered_zeros.h5")
assert (filtered_tsd.h5_table["samples/names"][:] == \
np.array([b'a', b'b', b'd', b'e'])).all()
assert (filtered_tsd.h5_table["samples/mask"][:] == [b'1']*4).all()
def test_stat_bin():
x = [1, 2, 3]
y = [1, 2, 3]
df = pd.DataFrame({'x': x, 'y': y})
# About the default bins
gg = ggplot(aes(x='x'), df) + stat_bin()
if not six.PY2:
# Test fails on PY2 when all the tests are run,
# but not when only this test module is run
with pytest.warns(None) as record:
gg.draw_test()
res = ('bins' in str(item.message).lower() for item in record)
assert any(res)
# About the ignoring the y aesthetic
gg = ggplot(aes(x='x', y='y'), df) + stat_bin()
with pytest.raises(PlotnineError):
gg.draw_test()
def test_removes_infinite_values():
df = mtcars.copy()
df.loc[[0, 5], 'wt'] = [np.inf, -np.inf]
p = ggplot(df, aes(x='wt')) + geom_bar()
with pytest.warns(UserWarning) as record:
p._build()
def removed_2_row_with_infinites(record):
for item in record:
msg = str(item.message).lower()
if '2 rows' in msg and 'non-finite' in msg:
return True
return False
assert removed_2_row_with_infinites(record)
def test_warning_api_with_different_selectors(version, route):
other_version = Version(
name="2.0",
selector=version_selector("application/vnd.test.v2+json"),
routes=(
route,
),
)
api = API(
name="Test API",
versions=(
version,
other_version,
)
)
with pytest.warns(SpecificationWarning):
api.validate()
def test_overwrite_warning():
class FooLang(Lang):
pass
assert by_name('foolang') is FooLang
assert by_name('foolang').name == 'FooLang'
with pytest.warns(UserWarning) as record:
class BarLang(Lang, name='Foolang'):
pass
assert len(record) == 1
message = str(record[0].message)
assert "foolang" in message
assert FooLang.__name__ in message
assert "overwrites" in message
assert BarLang.__name__ in message
assert by_name('foolang') is BarLang
assert by_name('foolang').name == 'Foolang'
def test_simple_filter(self):
input_annotations_group = [
np.array([
[ 0, 0, 10, 10],
[150, 150, 50, 50]
]),
]
input_image = np.zeros((500, 500, 3))
expected_annotations_group = [
np.array([
[0, 0, 10, 10],
]),
]
simple_generator = SimpleGenerator(input_annotations_group)
annotations_group = simple_generator.load_annotations_group(simple_generator.groups[0])
# expect a UserWarning
with pytest.warns(UserWarning):
image_group, annotations_group = simple_generator.filter_annotations([input_image], annotations_group, simple_generator.groups[0])
np.testing.assert_equal(expected_annotations_group, annotations_group)
def test_complete(self):
input_annotations_group = [
np.array([
[ 0, 0, 50, 50, 0], # one object of class 0
[150, 150, 50, 50, 1], # one object of class 1 with an invalid box
], dtype=keras.backend.floatx()),
]
input_image = np.zeros((500, 500, 3), dtype=np.uint8)
expected_annotations_group = [
np.array([
[0, 0, 10, 10],
]),
]
simple_generator = SimpleGenerator(input_annotations_group, image=input_image, num_classes=2)
# expect a UserWarning
with pytest.warns(UserWarning):
_, [_, labels_batch] = simple_generator.next()
# test that only object with class 0 is present in labels_batch
labels = np.unique(np.argmax(labels_batch == 1, axis=2))
assert(len(labels) == 1 and labels[0] == 0), 'Expected only class 0 to be present, but got classes {}'.format(labels)
def test_load_pkcs12_text_passphrase(self):
"""
A PKCS12 string generated using the openssl command line can be loaded
with `load_pkcs12` and its components extracted and examined.
Using text as passphrase instead of bytes. DeprecationWarning expected.
"""
pem = client_key_pem + client_cert_pem
passwd = b"whatever"
p12_str = _runopenssl(pem, b"pkcs12", b"-export", b"-clcerts",
b"-passout", b"pass:" + passwd)
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
p12 = load_pkcs12(p12_str, passphrase=b"whatever".decode("ascii"))
assert (
"{0} for passphrase is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
self.verify_pkcs12_container(p12)
def test_export_without_bytes(self):
"""
Test `PKCS12.export` with text not bytes as passphrase
"""
p12 = self.gen_pkcs12(server_cert_pem, server_key_pem, root_cert_pem)
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
dumped_p12 = p12.export(passphrase=b"randomtext".decode("ascii"))
assert (
"{0} for passphrase is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
self.check_recovery(
dumped_p12,
key=server_key_pem,
cert=server_cert_pem,
passwd=b"randomtext"
)
def test_does_authorize_unsupported_authorizer(self, demo_app_auth,
lambda_context_args,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/iam'
event = create_event(path, 'GET', {})
context = LambdaContext(*lambda_context_args)
with pytest.warns(None) as recorded_warnings:
new_event, new_context = authorizer.authorize(path, event, context)
assert event == new_event
assert context == new_context
assert len(recorded_warnings) == 1
warning = recorded_warnings[0]
assert issubclass(warning.category, UserWarning)
assert ('IAMAuthorizer is not a supported in local '
'mode. All requests made against a route will be authorized'
' to allow local testing.') in str(warning.message)
def test_update_equation_function(switch_fixture):
sys = SwitchedSystem(
dim_output=1,
event_variable_equation_function=event_variable_equation_function,
event_bounds=switch_fixture[0],
state_equations_functions=switch_fixture[1],
output_equations_functions=switch_fixture[2],
)
assert not hasattr(sys, 'condition_idx')
sys.prepare_to_integrate()
assert sys.condition_idx is None
sys.update_equation_function(np.random.rand(1), bounds_min)
assert sys.condition_idx == 0
for cnd_idx, zero in enumerate(switch_fixture[0]):
sys.update_equation_function(np.random.rand(1), zero)
assert sys.condition_idx == cnd_idx+1
if len(switch_fixture[0]) > 1:
with pytest.warns(UserWarning):
sys.update_equation_function(np.random.rand(1), bounds_min)
def test_feature_union_fit_failure_multiple_metrics():
scoring = {"score_1": _passthrough_scorer, "score_2": _passthrough_scorer}
X, y = make_classification(n_samples=100, n_features=10, random_state=0)
pipe = Pipeline([('union', FeatureUnion([('good', MockClassifier()),
('bad', FailingClassifier())],
transformer_weights={'bad': 0.5})),
('clf', MockClassifier())])
grid = {'union__bad__parameter': [0, 1, 2]}
gs = dcv.GridSearchCV(pipe, grid, refit=False, scoring=scoring)
# Check that failure raises if error_score is `'raise'`
with pytest.raises(ValueError):
gs.fit(X, y)
# Check that grid scores were set to error_score on failure
gs.error_score = float('nan')
with pytest.warns(FitFailedWarning):
gs.fit(X, y)
for key in scoring:
check_scores_all_nan(gs, 'union__bad__parameter', score_key=key)
def test_pipeline_fit_failure():
X, y = make_classification(n_samples=100, n_features=10, random_state=0)
pipe = Pipeline([('bad', FailingClassifier()),
('good1', MockClassifier()),
('good2', MockClassifier())])
grid = {'bad__parameter': [0, 1, 2]}
gs = dcv.GridSearchCV(pipe, grid, refit=False)
# Check that failure raises if error_score is `'raise'`
with pytest.raises(ValueError):
gs.fit(X, y)
# Check that grid scores were set to error_score on failure
gs.error_score = float('nan')
with pytest.warns(FitFailedWarning):
gs.fit(X, y)
check_scores_all_nan(gs, 'bad__parameter')
def test_spa_vocab():
# create a model without a vocab and check that it is empty
model = spa.Network()
assert len(model.vocabs) == 0
# create a model with a vocab and check that it's filled
va = spa.Vocabulary(16)
va.populate("PANTS")
vb = spa.Vocabulary(32)
vb.populate("SHOES")
model = spa.Network(vocabs=VocabularyMap([va, vb]))
assert list(model.vocabs[16].keys()) == ["PANTS"]
assert list(model.vocabs[32].keys()) == ["SHOES"]
# warning on vocabs with duplicate dimensions
vc = spa.Vocabulary(16)
vc.populate("SOCKS")
with pytest.warns(UserWarning):
model = spa.Network(vocabs=VocabularyMap([va, vb, vc]))
assert list(model.vocabs[16].keys()) == ["SOCKS"]
assert list(model.vocabs[32].keys()) == ["SHOES"]
def test_scalar_instead_of_list_triggers_specificationwarning():
with pytest.warns(oparl.SpecificationWarning) as record:
obj = oparl.from_json('''{
"id": "object-with-scalar-instead-of-list",
"type": "https://schema.oparl.org/1.0/Person",
"membership": {
"id": "does-not-exist",
"type": "https://schema.oparl.org/1.0/Membership"
}
}''')
assert len(record) == 1
assert 'non-list value' in str(record[0].message)
membership = obj['membership']
assert isinstance(membership, list)
assert len(membership) == 1
assert membership[0]['id'] == 'does-not-exist'
def test_object_instead_of_reference_in_list_triggers_specificationwarning():
with pytest.warns(oparl.SpecificationWarning) as record:
obj = oparl.from_json('''{
"id": "object-with-object-instead-of-reference-in-list",
"type": "https://schema.oparl.org/1.0/System",
"otherOparlVersions": [{
"id": "does-not-exist",
"type": "https://schema.oparl.org/1.0/System"
}]
}''')
assert len(record) == 1
assert 'must contain references' in str(record[0].message)
others = obj['otherOparlVersions']
assert isinstance(others, list)
assert len(others) == 1
assert isinstance(others[0], oparl.objects.System)
assert others[0]['id'] == 'does-not-exist'
def test_step_blocks(Simulator, seed):
with nengo.Network(seed=seed) as net:
inp = nengo.Node(np.sin)
ens = nengo.Ensemble(10, 1)
nengo.Connection(inp, ens)
p = nengo.Probe(ens)
with Simulator(net, unroll_simulation=25) as sim1:
sim1.run_steps(50)
with Simulator(net, unroll_simulation=10) as sim2:
sim2.run_steps(50)
assert np.allclose(sim1.data[p], sim2.data[p])
with pytest.warns(RuntimeWarning):
with Simulator(net, unroll_simulation=5) as sim:
sim.run_steps(2)
def test_group_indices():
df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
'y': [1, 2, 3, 4, 5, 6, 5]})
results = df >> group_by('x') >> group_indices()
assert all(results == [1, 4, 2, 2, 3, 0, 3])
results = df >> group_indices('y % 2')
assert all(results == [1, 0, 1, 0, 1, 0, 1])
results = df >> group_indices()
assert all(results == [1, 1, 1, 1, 1, 1, 1])
# Branches
with pytest.warns(UserWarning):
df >> group_by('x') >> group_indices('y')
def test_savemoviefiles():
# This function is deprecated
if os.name != 'nt':
# If not on Windows, this should break
with pytest.raises(OSError):
files.savemoviefiles('invalid.avi', np.zeros(10), path='./')
else:
# Trigger an import error
with mock.patch.dict("sys.modules", {"PIL": {}}):
with pytest.raises(ImportError):
files.savemoviefiles('invalid.avi', np.zeros(10), path='./')
# smoke test
with pytest.warns(UserWarning):
files.savemoviefiles('invalid.avi', np.zeros(10), path='./')
def test_conversion_to_respecth(self, filename_ck):
"""Test proper conversion to ReSpecTh XML.
"""
file_path = os.path.join(filename_ck)
filename = pkg_resources.resource_filename(__name__, file_path)
c_true = ChemKED(filename)
with TemporaryDirectory() as temp_dir:
newfile = os.path.join(temp_dir, 'test.xml')
c_true.convert_to_ReSpecTh(newfile)
with pytest.warns(UserWarning) as record:
c = ChemKED.from_respecth(newfile)
m = str(record.pop(UserWarning).message)
assert m == 'Using DOI to obtain reference information, rather than preferredKey.'
assert c.file_authors[0]['name'] == c_true.file_authors[0]['name']
assert c.reference.detail == 'Converted from ReSpecTh XML file {}'.format(os.path.split(newfile)[1])
assert c.apparatus.kind == c_true.apparatus.kind
assert c.experiment_type == c_true.experiment_type
assert c.reference.doi == c_true.reference.doi
assert len(c.datapoints) == len(c_true.datapoints)
def test_absolute_asym_comp_uncertainty(self):
properties = self.load_properties('testfile_uncertainty.yaml')
with pytest.warns(UserWarning) as record:
d = DataPoint(properties[0])
m = str(record.pop(UserWarning).message)
assert m == ('Asymmetric uncertainties are not supported. The maximum of lower-uncertainty '
'and upper-uncertainty has been used as the symmetric uncertainty.')
assert np.isclose(d.composition[2]['amount'].value, Q_(99.0))
assert np.isclose(d.composition[2]['amount'].error, Q_(1.0))
with pytest.warns(UserWarning) as record:
d = DataPoint(properties[1])
m = str(record.pop(UserWarning).message)
assert m == ('Asymmetric uncertainties are not supported. The maximum of lower-uncertainty '
'and upper-uncertainty has been used as the symmetric uncertainty.')
assert np.isclose(d.composition[2]['amount'].value, Q_(99.0))
assert np.isclose(d.composition[2]['amount'].error, Q_(1.0))
def test_missing_doi_period_at_end(self):
"""Ensure can handle missing DOI with period at end of reference.
"""
root = etree.Element('experiment')
ref = etree.SubElement(root, 'bibliographyLink')
ref.set('preferredKey', 'Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond.'
)
with pytest.warns(UserWarning) as record:
ref = get_reference(root)
m = str(record.pop(UserWarning).message)
assert m == ('Missing doi attribute in bibliographyLink. Setting "detail" key as a '
'fallback; please update to the appropriate fields.')
assert ref['detail'] == ('Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond.'
)
def test_incorrect_doi(self, capfd):
"""Ensure can handle invalid DOI.
"""
root = etree.Element('experiment')
ref = etree.SubElement(root, 'bibliographyLink')
ref.set('doi', '10.1000/invalid.doi')
ref.set('preferredKey', 'Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond'
)
with pytest.warns(UserWarning) as record:
ref = get_reference(root)
m = str(record.pop(UserWarning).message)
assert m == ('Missing doi attribute in bibliographyLink or lookup failed. Setting "detail" '
'key as a fallback; please update to the appropriate fields.')
assert ref['detail'] == (
'Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond.'
)
def test_incorrect_doi_period_at_end(self, capfd):
"""Ensure can handle invalid DOI with period at end of reference.
"""
root = etree.Element('experiment')
ref = etree.SubElement(root, 'bibliographyLink')
ref.set('doi', '10.1000/invalid.doi')
ref.set('preferredKey', 'Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond.'
)
with pytest.warns(UserWarning) as record:
ref = get_reference(root)
m = str(record.pop(UserWarning).message)
assert m == ('Missing doi attribute in bibliographyLink or lookup failed. Setting "detail" '
'key as a fallback; please update to the appropriate fields.')
assert ref['detail'] == (
'Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond.'
)
def test_doi_missing_internet(self, disable_socket):
"""Ensure that DOI validation fails gracefully with no Internet.
"""
root = etree.Element('experiment')
ref = etree.SubElement(root, 'bibliographyLink')
ref.set('doi', '10.1016/j.ijhydene.2007.04.008')
ref.set('preferredKey', 'Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond'
)
with pytest.warns(UserWarning) as record:
ref = get_reference(root)
m = str(record.pop(UserWarning).message)
assert m == ('Missing doi attribute in bibliographyLink or lookup failed. Setting "detail" '
'key as a fallback; please update to the appropriate fields.')
assert ref['detail'] == ('Chaumeix, N., Pichon, S., Lafosse, F., Paillard, C.-E., '
'International Journal of Hydrogen Energy, 2007, (32) 2216-2226, '
'Fig. 12., right, open diamond.'
)
def test_species_missing_inchi(self, capfd):
"""Check for warning when species missing InChI.
"""
root = etree.Element('experiment')
properties = etree.SubElement(root, 'commonProperties')
initial_composition = etree.SubElement(properties, 'property')
initial_composition.set('name', 'initial composition')
component = etree.SubElement(initial_composition, 'component')
species = etree.SubElement(component, 'speciesLink')
species.set('preferredKey', 'H2')
amount = etree.SubElement(component, 'amount')
amount.set('units', 'mole fraction')
amount.text = '1.0'
with pytest.warns(UserWarning) as record:
get_common_properties(root)
m = str(record.pop(UserWarning).message)
assert m == 'Missing InChI for species H2'
def test_conversion_respth2ck_default_output(self):
"""Test respth2ck converter when used via command-line arguments.
"""
file_path = os.path.join('testfile_st.xml')
filename = pkg_resources.resource_filename(__name__, file_path)
with TemporaryDirectory() as temp_dir:
xml_file = copy(filename, temp_dir)
with pytest.warns(UserWarning) as record:
respth2ck(['-i', xml_file])
newfile = os.path.join(os.path.splitext(xml_file)[0] + '.yaml')
assert os.path.exists(newfile)
m = str(record.pop(UserWarning).message)
assert m == 'Using DOI to obtain reference information, rather than preferredKey.'