def get_job_groups(browser, root_url, args):
if args.job_group_urls:
job_group_urls = args.job_group_urls.split(',')
log.info("Acting on specified job group URL(s): %s" % ', '.join(job_group_urls))
job_groups = {i: url for i, url in enumerate(job_group_urls)}
else:
parent_groups = get_parent_job_groups(browser, root_url, args)
if args.no_progress or not humanfriendly_available:
results = browser.get_json(urljoin(root_url, 'api/v1/job_groups'))
else:
with AutomaticSpinner(label='Retrieving job groups'):
results = browser.get_json(urljoin(root_url, 'api/v1/job_groups'))
def _pgroup_prefix(group):
try:
return '%s / %s' % (parent_groups[group['parent_id']], group['name'])
except KeyError:
return group['name']
job_groups = {}
for job_group in results:
job_groups[_pgroup_prefix(job_group)] = urljoin(root_url, '/group_overview/%i' % job_group['id'])
if args.job_groups:
job_pattern = re.compile('(%s)' % '|'.join(args.job_groups.split(',')))
job_groups = {k: v for k, v in iteritems(job_groups) if job_pattern.search(k)}
log.info("Job group URL for %s: %s" % (args.job_groups, job_groups))
if args.exclude_job_groups:
job_pattern = re.compile('(%s)' % '|'.join(args.exclude_job_groups.split(',')))
job_groups = {k: v for k, v in iteritems(job_groups) if not job_pattern.search(k)}
log.info("Job group URL excluding %s: %s" % (args.exclude_job_groups, job_groups))
return SortedDict(job_groups)
评论列表
文章目录