def run_all_tests(args=None):
global STATUS_COUNTER
env = parse_env_vars()
if args is None:
parser = build_cli_parser()
args = parser.parse_args()
args.config_dir = None
if not args.dask_scheduler:
args.dask_scheduler = env.get('DASK_SCHEDULER', '10.0.0.10:8786')
if not args.dask_clients or 'ALL' in args.dask_clients:
args.dask_clients = [c for c in DASK_CLIENTS if c != 'ALL']
logger.info('Running run_all_tests with args: {}'.format(args))
assert os.path.exists(args.repo_dir)
for client in args.dask_clients:
eedp = os.path.join(args.elm_examples_path, 'example_data')
if not os.path.exists(eedp):
eedp = os.environ.get('ELM_EXAMPLE_DATA_PATH')
new_env = {'DASK_SCHEDULER': args.dask_scheduler or '',
'DASK_CLIENT': client,
'ELM_EXAMPLE_DATA_PATH': eedp}
if not args.skip_pytest:
run_all_unit_tests(args.repo_dir, new_env,
pytest_mark=args.pytest_mark)
if not args.skip_scripts:
run_all_example_scripts(new_env, path=os.path.join(args.elm_examples_path, 'scripts'),
glob_pattern=args.glob_pattern)
if not args.skip_configs:
run_all_example_configs(new_env, path=os.path.join(args.elm_examples_path, 'configs'),
large_test_mode=args.add_large_test_settings,
glob_pattern=args.glob_pattern)
failed_unit_tests = STATUS_COUNTER.get('unit_tests') != 'ok' and not args.skip_pytest
if STATUS_COUNTER.get('fail') or failed_unit_tests:
raise ValueError('Tests failed {}'.format(STATUS_COUNTER))
print('ETIMES', ETIMES)
speed_up_fracs = {k: [] for k in args.dask_clients if k != 'SERIAL'}
for fname in ETIMES:
if fname == 'unit_tests':
continue
if ETIMES[fname].get("SERIAL"):
base = ETIMES[fname]['SERIAL']
for k, v in ETIMES[fname].items():
if k == 'SERIAL':
continue
speed_up_fracs[k].append( (base - v) / base)
speed_up_fracs_summary = {k: describe(np.array(v))
for k, v in speed_up_fracs.items()}
print('speed_up_fracs {}'.format(speed_up_fracs))
print('Speed up summary {}'.format(speed_up_fracs_summary))
print('STATUS', STATUS_COUNTER)
评论列表
文章目录