def handle(self, *args, **options):
tmpdir = tempfile.mkdtemp()
try:
queryset = AnalysisJob.objects.all().filter(status=AnalysisJob.Status.COMPLETE)
filter_set = AnalysisJobFilterSet()
queryset = filter_set.filter_latest(queryset, 'latest', True)
tmp_csv_filename = os.path.join(tmpdir, 'results.csv')
with open(tmp_csv_filename, 'w') as csv_file:
writer = None
fieldnames = []
for job in queryset:
row_data = {}
for export in EXPORTS:
columns, values = export(job)
if writer is None:
fieldnames = fieldnames + columns
for column, value in zip(columns, values):
row_data[column] = value
if writer is None:
writer = csv.DictWriter(csv_file,
fieldnames=fieldnames,
dialect=csv.excel,
quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
writer.writerow(row_data)
s3_client = boto3.client('s3')
now = datetime.utcnow()
s3_key = 'analysis-spreadsheets/results-{}.csv'.format(now.strftime('%Y-%m-%dT%H%M'))
s3_client.upload_file(tmp_csv_filename, settings.AWS_STORAGE_BUCKET_NAME, s3_key)
logger.info('File uploaded to: s3://{}/{}'
.format(settings.AWS_STORAGE_BUCKET_NAME, s3_key))
finally:
shutil.rmtree(tmpdir)
generate_analysis_csv.py 文件源码
python
阅读 42
收藏 0
点赞 0
评论 0
评论列表
文章目录