def download_and_unzip_result(url, job_hash):
r = requests.get(url, stream=True)
status_check(r)
total_size = int(r.headers.get('content-length', 0))
with open('download.zip', 'wb') as f:
pbar = tqdm(total=total_size, unit='B', unit_scale=True)
chunk_size = 1024 * 32 # 32kb
for data in r.iter_content(chunk_size):
f.write(data)
pbar.update(chunk_size)
# again there might be a pbar issue here
pbar.close()
zip_content = open("download.zip", "rb").read()
z = ZipFile(io.BytesIO(zip_content))
z.extractall()
remove('download.zip')
result = None # output of the script
new_files = None # names of new files created by the script
pickle_path = path.abspath(path.join(job_hash, job_hash + '.pkl'))
if path.isfile(pickle_path):
with open(pickle_path, 'rb') as f:
# Hack: a workaround for dill's pickling problem
# import_all()
result = dill.load(f)
# unimport_all()
remove(pickle_path)
if path.isdir(job_hash):
new_files = listdir(job_hash)
for name in new_files:
rename(path.join(job_hash, name), name)
rmtree(job_hash)
return result, new_files
评论列表
文章目录