def test_spec_debugged_via_cmdline_target_exists_export_cancel(self):
stub_item_attr_value(
self, mocks, 'dummy',
runtime.ToolchainRuntime(toolchain.NullToolchain()),
)
working_set = mocks.WorkingSet({
'calmjs.runtime': [
'tool = calmjs.testing.mocks:dummy',
],
})
tmpdir = mkdtemp(self)
target = join(tmpdir, 'target')
open(target, 'w').close()
rt = runtime.Runtime(working_set=working_set, prog='calmjs')
stub_stdouts(self)
stub_stdin(self, u'n\n')
stub_check_interactive(self, True)
result = rt(['tool', '--export-target', target, '-dd', '-vv'])
self.assertEqual(result['debug'], 2)
# This is an integration test of sort for the debug advice output
self.assertIn("advise 'cleanup' invoked by", sys.stderr.getvalue())
self.assertIn("toolchain.py", sys.stderr.getvalue())
self.assertIn(
'advise(AFTER_PREPARE, self.check_export_target_exists, spec)',
sys.stderr.getvalue(),
)
python类WorkingSet()的实例源码
def test_root_runtime_errors_ignored(self):
stub_stdouts(self)
working_set = mocks.WorkingSet({'calmjs.runtime': [
'foo = calmjs.nosuchmodule:no.where',
'bar = calmjs.npm:npm',
'npm = calmjs.npm:npm.runtime',
]})
rt = runtime.Runtime(working_set=working_set)
with self.assertRaises(SystemExit):
rt(['-h'])
out = sys.stdout.getvalue()
self.assertNotIn('foo', out)
self.assertIn('npm', out)
def test_npm_description(self):
stub_stdouts(self)
working_set = mocks.WorkingSet({'calmjs.runtime': [
'npm = calmjs.npm:npm.runtime',
]})
rt = runtime.Runtime(working_set=working_set)
with self.assertRaises(SystemExit):
rt(['npm', '-h'])
out = sys.stdout.getvalue()
self.assertIn('npm support for the calmjs framework', out)
def test_root_runtime_bad_names(self):
working_set = mocks.WorkingSet({'calmjs.runtime': [
'bad name = calmjs.npm:npm.runtime',
'bad.name = calmjs.npm:npm.runtime',
'badname:likethis = calmjs.npm:npm.runtime',
]})
stderr = mocks.StringIO()
with pretty_logging(
logger='calmjs.runtime', level=DEBUG, stream=stderr):
rt = runtime.Runtime(working_set=working_set)
rt.argparser
err = stderr.getvalue()
self.assertIn("bad 'calmjs.runtime' entry point", err)
stub_stdouts(self)
with self.assertRaises(SystemExit):
rt(['-h'])
out = sys.stdout.getvalue()
# this results in unnatural argparsing situation
self.assertNotIn('bad name', out)
# reserved for disambiguation
self.assertNotIn('bad.name', out)
self.assertNotIn('badname:likethis', out)
# command listing naturally not available.
self.assertNotIn('npm', out)
def setup_dupe_runtime(self):
from calmjs.testing import utils
from calmjs.npm import npm
utils.foo_runtime = runtime.PackageManagerRuntime(npm.cli_driver)
utils.runtime_foo = runtime.PackageManagerRuntime(npm.cli_driver)
def cleanup():
del utils.foo_runtime
del utils.runtime_foo
self.addCleanup(cleanup)
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.runtime]\n'
'bar = calmjs.testing.utils:foo_runtime\n'
),), 'example1.foo', '1.0')
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.runtime]\n'
'bar = calmjs.testing.utils:foo_runtime\n'
),), 'example2.foo', '1.0')
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.runtime]\n'
'bar = calmjs.testing.utils:runtime_foo\n'
'baz = calmjs.testing.utils:runtime_foo\n'
),), 'example3.foo', '1.0')
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.runtime]\n'
'bar = calmjs.testing.utils:runtime_foo\n'
'baz = calmjs.testing.utils:runtime_foo\n'
),), 'example4.foo', '1.0')
return pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
def test_duplication_and_runtime_not_recursion(self):
"""
Make sure it explodes normally if standard runtime error.
"""
from calmjs.testing import utils
class BadAtInit(runtime.DriverRuntime):
def init_argparser(self, argparser):
if argparser is not self.argparser:
raise RuntimeError('A fake explosion')
def cleanup():
del utils.badatinit
self.addCleanup(cleanup)
stub_stdouts(self)
# create a dummy dist
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.runtime]\n'
'badatinit = calmjs.testing.utils:badatinit\n'
),), 'example.badsimple', '1.0')
working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
utils.badatinit = BadAtInit(None)
# and here lies the crimson magician, all out of hp.
with pretty_logging(
logger='calmjs.runtime', stream=mocks.StringIO()) as s:
runtime.Runtime(working_set=working_set).argparser
self.assertIn(
"cannot register entry_point "
"'badatinit = calmjs.testing.utils:badatinit' from "
"'example.badsimple 1.0' ", s.getvalue()
)
def test_runtime_recursion_that_is_totally_our_fault(self):
"""
If stuff does blow up, don't blame the wrong party if we can
help it.
"""
from calmjs.testing import utils
stub_stdouts(self)
# We kind of have to punt this, so punt it with a stupid
# override using an EntryPoint that explodes.
class TrulyBadAtInit(runtime.Runtime):
def init_argparser(self, argparser):
raise RuntimeError('maximum recursion depth exceeded')
def cleanup():
del utils.trulybad
self.addCleanup(cleanup)
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.runtime]\n'
'trulybad = calmjs.testing.utils:trulybad\n'
),), 'example.badsimple', '1.0')
working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
utils.trulybad = TrulyBadAtInit(None)
with pretty_logging(
logger='calmjs.runtime', stream=mocks.StringIO()) as s:
runtime.Runtime(working_set=working_set).argparser
self.assertIn("maximum recursion depth exceeded", s.getvalue())
def setup_runtime(self):
# create a working set with our custom runtime entry point
# TODO should really improve the test case to provide custom
# runtime instances separate from actual data.
working_set = mocks.WorkingSet({
'calmjs.runtime': [
'cmd = calmjs.npm:npm.runtime',
],
})
return runtime.Runtime(working_set=working_set, prog='calmjs')
# for the test, we use the -u flag for the unknown tests as it is
# unknown to bootstrap and target parser. Next two are using known
# flag to before, then after.
def setup_runtime(self):
stub_stdouts(self)
remember_cwd(self)
cwd = mkdtemp(self)
os.chdir(cwd)
make_dummy_dist(self, (
('requirements.json', json.dumps({
'name': 'calmpy.pip',
'require': {
'setuptools': '25.1.6',
},
})),
), 'calmpy.pip', '2.0')
make_dummy_dist(self, (
('requires.txt', '[dev]\ncalmpy.pip'),
), 'site', '1.0')
working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
# Stub out the underlying data needed for the cli for the tests
# to test against our custom data for reproducibility.
stub_item_attr_value(self, dist, 'default_working_set', working_set)
stub_check_interactive(self, True)
driver = cli.PackageManagerDriver(
pkg_manager_bin='mgr', pkgdef_filename='requirements.json',
dep_keys=('require',),
)
return cwd, runtime.PackageManagerRuntime(driver)
# do note: the runtime is not registered to the root runtime
# directly, but this is a good enough emulation on how this would
# behave under real circumstances, as each of these runtime can and
# should be able to operate as independent entities.
def test_calmjs_main_console_version_broken(self):
stub_stdouts(self)
working_set = pkg_resources.WorkingSet([mkdtemp(self)])
stub_item_attr_value(self, runtime, 'default_working_set', working_set)
stub_item_attr_value(
self, calmjs_argparse, 'default_working_set', working_set)
# make sure the bad case doesn't just blow up...
with self.assertRaises(SystemExit) as e:
runtime.main(['-V'])
self.assertEqual(e.exception.args[0], 0)
self.assertIn('? ? from ?', sys.stdout.getvalue())
def test_get_package_advices(self):
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.toolchain.advice]\n'
'calmjs.toolchain:Toolchain = calmjs.tests.test_toolchain:dummy\n'
'calmjs.toolchain:Alt = calmjs.tests.test_toolchain:dummy\n'
),), 'example.package', '1.0')
working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set)
self.assertEqual(sorted(reg.get('example.package').keys()), [
'calmjs.toolchain:Alt',
'calmjs.toolchain:Toolchain',
])
def test_not_toolchain_process(self):
working_set = pkg_resources.WorkingSet([])
reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set)
with pretty_logging(stream=StringIO()) as s:
self.assertIsNone(
reg.process_toolchain_spec_package(object(), Spec(), 'calmjs'))
self.assertIn(
"must call process_toolchain_spec_package with a toolchain",
s.getvalue(),
)
def test_standard_toolchain_process_nothing(self):
working_set = pkg_resources.WorkingSet([])
reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set)
toolchain = Toolchain()
spec = Spec()
with pretty_logging(stream=StringIO()) as s:
reg.process_toolchain_spec_package(toolchain, spec, 'calmjs')
self.assertIn(
"no advice setup steps registered for package/requirement "
"'calmjs'", s.getvalue(),
)
def test_standard_toolchain_failure_process(self):
make_dummy_dist(self, ((
'entry_points.txt',
'[calmjs.toolchain.advice]\n'
'calmjs.toolchain:Toolchain = calmjs.tests.test_toolchain:bad\n'
'calmjs.toolchain:NullToolchain = '
'calmjs.tests.test_toolchain:dummy\n'
),), 'example.package', '1.0')
working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set)
toolchain = NullToolchain()
spec = Spec()
with pretty_logging(stream=StringIO()) as s:
reg.process_toolchain_spec_package(
toolchain, spec, 'example.package')
err = s.getvalue()
# inheritance applies.
self.assertIn(
"found advice setup steps registered for package/requirement "
"'example.package' for toolchain 'calmjs.toolchain:NullToolchain'",
err,
)
self.assertIn("ERROR", err)
self.assertIn(
"failure encountered while setting up advices through entry_point",
err)
# partial execution will be done, so do test stuff.
self.assertEqual(spec, {'dummy': ['dummy', 'bad']})
def test_toolchain_compile_loaderplugin_entry_registered(self):
"""
A rough standalone test for handling of loader plugins.
"""
reg = LoaderPluginRegistry('simple', _working_set=WorkingSet({
'simple': [
'foo = calmjs.tests.test_toolchain:MockLPHandler',
'bar = calmjs.tests.test_toolchain:MockLPHandler',
],
}))
src_dir = mkdtemp(self)
src = join(src_dir, 'target.txt')
spec = Spec(calmjs_loaderplugin_registry=reg)
with pretty_logging(stream=StringIO()) as s:
bar_results = self.toolchain.compile_loaderplugin_entry(spec, (
'bar!target.txt', src, 'bar!target.txt', 'bar!target.txt'))
foo_results = self.toolchain.compile_loaderplugin_entry(spec, (
'foo!target.txt', src, 'foo!target.txt', 'foo!target.txt'))
self.assertEqual('', s.getvalue())
self.assertEqual((
{'foo!target.txt': 'foo!target.txt'},
{'foo!target.txt': 'foo!target.txt'},
['foo!target.txt'],
), foo_results)
self.assertEqual((
{'bar!target.txt': 'bar!target.txt'},
{'bar!target.txt': 'bar!target.txt'},
['bar!target.txt'],
), bar_results)
# recursive lookups are generally not needed, if the target
# supplied _is_ the target.
def test_toolchain_spec_prepare_loaderplugins_missing(self):
reg = LoaderPluginRegistry('simple', _working_set=WorkingSet({
'simple': [
'foo = calmjs.tests.test_toolchain:MockLPHandler',
'bar = calmjs.tests.test_toolchain:MockLPHandler',
],
}))
spec = Spec(
calmjs_loaderplugin_registry=reg,
loaderplugin_sourcepath_maps={
'foo': {'foo!thing': 'thing'},
'missing': {'missing!thing': 'thing'},
'bar': {'bar!thing': 'thing'},
},
)
with pretty_logging(stream=StringIO()) as s:
toolchain_spec_prepare_loaderplugins(
self.toolchain, spec, 'loaderplugin', 'loaders')
self.assertEqual({
'foo!thing': 'thing',
'bar!thing': 'thing',
}, spec['loaderplugin_sourcepath'])
self.assertEqual({
'foo': 'foo',
'bar': 'bar',
}, spec['loaders'])
self.assertIn(
"loaderplugin handler for 'missing' not found in loaderplugin "
"registry 'simple'", s.getvalue())
self.assertIn(
"will not be compiled into the build target: ['missing!thing']",
s.getvalue())
def test_not_defined(self):
working_set = pkg_resources.WorkingSet([
self.ds_egg_root,
])
stub_item_attr_value(self, pkg_resources, 'working_set', working_set)
p = indexer.resource_filename_mod_entry_point('dummyns', None)
self.assertEqual(normcase(p), normcase(self.dummyns_path))
def test_mismatched(self):
# mismatch includes a package that doesn't actually have the
# directory created
d_egg_root = join(mkdtemp(self), 'dummyns')
make_dummy_dist(self, ((
'namespace_packages.txt',
'dummyns\n',
), (
'entry_points.txt',
'[dummyns]\n'
'dummyns = dummyns:attr\n',
),), 'dummyns', '1.0', working_dir=d_egg_root)
working_set = pkg_resources.WorkingSet([
d_egg_root,
self.ds_egg_root,
])
stub_item_attr_value(self, pkg_resources, 'working_set', working_set)
dummyns_ep = next(working_set.iter_entry_points('dummyns'))
with pretty_logging(stream=StringIO()) as fd:
p = indexer.resource_filename_mod_entry_point(
'dummyns', dummyns_ep)
self.assertIn(
"'dummyns' resolved by entry_point 'dummyns = dummyns:attr' leads "
"to no path", fd.getvalue()
)
self.assertEqual(normcase(p), normcase(self.dummyns_path))
def test_standard(self):
d_egg_root = join(mkdtemp(self), 'dummyns')
make_dummy_dist(self, ((
'namespace_packages.txt',
'dummyns\n',
), (
'entry_points.txt',
'[dummyns]\n'
'dummyns = dummyns:attr\n',
),), 'dummyns', '1.0', working_dir=d_egg_root)
working_set = pkg_resources.WorkingSet([
d_egg_root,
self.ds_egg_root,
])
stub_item_attr_value(self, pkg_resources, 'working_set', working_set)
moddir = join(d_egg_root, 'dummyns')
os.makedirs(moddir)
# make this also a proper thing
with open(join(moddir, '__init__.py'), 'w') as fd:
fd.write('')
dummyns_ep = next(working_set.iter_entry_points('dummyns'))
p = indexer.resource_filename_mod_entry_point('dummyns', dummyns_ep)
# finally, this should work.
self.assertEqual(normcase(p), normcase(moddir))
def test_yarn_install_package_json_overwrite_interactive(self):
# Testing the implied init call
stub_mod_call(self, cli)
stub_stdin(self, 'y\n')
stub_stdouts(self)
tmpdir = mkdtemp(self)
os.chdir(tmpdir)
# All the pre-made setup.
app = make_dummy_dist(self, (
('requires.txt', '\n'.join([])),
('package.json', json.dumps({
'dependencies': {'jquery': '~1.11.0'},
})),
), 'foo', '1.9.0')
working_set = WorkingSet()
working_set.add(app, self._calmjs_testing_tmpdir)
stub_item_attr_value(self, dist, 'default_working_set', working_set)
# We are going to have a fake package.json
with open(join(tmpdir, 'package.json'), 'w') as fd:
json.dump({}, fd)
# This is faked.
yarn.yarn_install('foo', overwrite=True)
with open(join(tmpdir, 'package.json')) as fd:
config = json.load(fd)
# Overwritten
self.assertEqual(config, {
'dependencies': {'jquery': '~1.11.0'},
'devDependencies': {},
'name': 'foo',
})
# No log level set.
self.assertEqual(sys.stdout.getvalue(), '')
self.assertEqual(sys.stderr.getvalue(), '')