def load_data_from_server(term, dry_run=False):
try:
url = build_static_term_url(term)
parsed_data = request_data(url, term)
except BadDataException:
print(f'{term}: static file is invlid xml; attempting database query')
url = build_term_url(term)
try:
parsed_data = request_data(url, term)
except BadDataException:
print(f'{term}: fetching attempt #1 failed')
try:
parsed_data = request_data(url, term)
except BadDataException:
print(f'{term}: fetching attempt #2 failed')
try:
parsed_data = request_data(url, term)
except BadDataException:
print(f'{term}: fetching attempt #3 failed')
print(f'{term}: no xml returned after three tries')
return None
if not parsed_data['searchresults']:
logging.info(f'No data returned for {term}')
return None
# We sort the courses here, before we save it to disk, so that we don't
# need to re-sort every time we load from disk.
parsed_data['searchresults']['course'].sort(key=lambda c: c['clbid'])
# Embed the term into each course individually
for course in parsed_data['searchresults']['course']:
course['term'] = term
if not dry_run:
destination = make_xml_term_path(term)
serialized_data = xmltodict.unparse(parsed_data, pretty=True)
save_data(serialized_data, destination)
logging.debug(f'Fetched {destination}')
return parsed_data
评论列表
文章目录