python类AWS_STORAGE_BUCKET_NAME的实例源码

amazons3service.py 文件源码 项目:itaplay 作者: lhalam 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def save_on_amazon_with_boto(clipfile):
    """Function that uploads clip on amazon

        Returns :
            str : url
    """
    if clipfile.size > MAX_CLIP_SIZE:
        raise ValidationError("Your file is too large. Please enter valid file")
    else:
        conn = S3Connection(local_settings.AWS_ACCESS_KEY_ID,
                            local_settings.AWS_SECRET_ACCESS_KEY)
        bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
        k = boto.s3.key.Key(bucket)
        k.key = settings.MEDIAFILES_LOCATION + clipfile.name
        # save on S3
        k.set_contents_from_file(clipfile)
        # make public
        k.set_acl('public-read')
        # generate url which will be save in database 
        url = k.generate_url(expires_in=0, query_auth=False)
        return url
models.py 文件源码 项目:pfb-network-connectivity 作者: azavea 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def generate_tiles(self):
        environment = self.base_environment()
        environment.update({
            'PFB_JOB_ID': str(self.uuid),
            'AWS_STORAGE_BUCKET_NAME': settings.AWS_STORAGE_BUCKET_NAME,
            'PFB_S3_RESULTS_PATH': self.s3_results_path,
            'PFB_S3_TILES_PATH': self.s3_tiles_path
        })

        # Workaround for not being able to run development jobs on the actual batch cluster:
        # bail out with a helpful message
        if settings.DJANGO_ENV == 'development':
            logger.warn("Can't actually run development tiling jobs on AWS. Try this:"
                        "\nAWS_STORAGE_BUCKET_NAME='{AWS_STORAGE_BUCKET_NAME}' "
                        "PFB_JOB_ID='{PFB_JOB_ID}' "
                        "PFB_S3_RESULTS_PATH='{PFB_S3_RESULTS_PATH}' "
                        "PFB_S3_TILES_PATH='{PFB_S3_TILES_PATH}' "
                        "docker-compose run tilemaker".format(**environment))
            return

        job_params = {
            'jobName': self.tilemaker_job_name,
            'jobDefinition': self.tilemaker_job_definition,
            'jobQueue': settings.PFB_AWS_BATCH_TILEMAKER_JOB_QUEUE_NAME,
            'dependsOn': [{'jobId': self.batch_job_id}],
            'containerOverrides': {
                'environment': create_environment(**environment),
            }
        }
        client = boto3.client('batch')
        try:
            response = client.submit_job(**job_params)
            logger.info('Exporting tiles for AnalysisJob {}, job {}'.format(self.uuid,
                                                                            response['jobId']))
        except Exception:
            logger.exception('Error starting tile export for AnalysisJob {}'.format(self.uuid))
            raise
models.py 文件源码 项目:pfb-network-connectivity 作者: azavea 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _s3_url_for_result_resource(self, filename):
        return 'https://s3.amazonaws.com/{bucket}/{path}/{filename}'.format(
            bucket=settings.AWS_STORAGE_BUCKET_NAME,
            path=self.s3_results_path,
            filename=filename,
        )
generate_analysis_csv.py 文件源码 项目:pfb-network-connectivity 作者: azavea 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def handle(self, *args, **options):

        tmpdir = tempfile.mkdtemp()

        try:
            queryset = AnalysisJob.objects.all().filter(status=AnalysisJob.Status.COMPLETE)
            filter_set = AnalysisJobFilterSet()
            queryset = filter_set.filter_latest(queryset, 'latest', True)

            tmp_csv_filename = os.path.join(tmpdir, 'results.csv')
            with open(tmp_csv_filename, 'w') as csv_file:
                writer = None
                fieldnames = []

                for job in queryset:
                    row_data = {}
                    for export in EXPORTS:
                        columns, values = export(job)
                        if writer is None:
                            fieldnames = fieldnames + columns
                        for column, value in zip(columns, values):
                            row_data[column] = value
                    if writer is None:
                        writer = csv.DictWriter(csv_file,
                                                fieldnames=fieldnames,
                                                dialect=csv.excel,
                                                quoting=csv.QUOTE_MINIMAL)
                        writer.writeheader()
                    writer.writerow(row_data)

            s3_client = boto3.client('s3')
            now = datetime.utcnow()
            s3_key = 'analysis-spreadsheets/results-{}.csv'.format(now.strftime('%Y-%m-%dT%H%M'))
            s3_client.upload_file(tmp_csv_filename, settings.AWS_STORAGE_BUCKET_NAME, s3_key)
            logger.info('File uploaded to: s3://{}/{}'
                        .format(settings.AWS_STORAGE_BUCKET_NAME, s3_key))
        finally:
            shutil.rmtree(tmpdir)
s3.py 文件源码 项目:EnglishDiary 作者: jupiny 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def delete_file_from_s3(filename):
    conn = S3Connection(
        settings.AWS_ACCESS_KEY_ID,
        settings.AWS_SECRET_ACCESS_KEY,
    )
    b = Bucket(
        conn,
        settings.AWS_STORAGE_BUCKET_NAME,
    )
    k = Key(b)
    k.key = filename
    b.delete_key(k)
backup_project.py 文件源码 项目:django-green-grove 作者: dreipol 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def back_up_bucket(self):
        logger.info('Start backing up the bucket data.')

        boto_connection = boto.connect_s3(
            aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
            aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
            host=settings.AWS_S3_HOST,
        )
        source_bucket = boto_connection.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
        destination_bucket = boto_connection.get_bucket(settings.BACKUP_BUCKET_BUCKET_NAME)
        destination_sub_directory = '{location}/{timestamp}'.format(location=settings.BACKUP_BUCKET_LOCATION,
                                                                    timestamp=self.timestamp)

        try:
            key_list = [source_key.key for source_key in source_bucket.list() if source_key.size]
        except ValueError:
            raise ValueError('The backup task was aborted because of some bucket keys with no size. Set '
                             '`DJANGO_GREEN_GROVE_EMPTY_S3_KEYS` in your settings to get a list of the keys.')

        if hasattr(settings, 'DJANGO_GREEN_GROVE_EMPTY_S3_KEYS'):
            error_message = 'Some bucket keys were ignored during the backup task because they have no size'
            try:
                empty_keys = [source_key.key for source_key in source_bucket.list() if not source_key.size]
                error_message += ': %s' % ', '.join(empty_keys)
            except:
                error_message += '.'

            logger.error(error_message)

        for key in key_list:
            new_key_name = '{sub_directory}/{name}'.format(sub_directory=destination_sub_directory, name=key)
            destination_bucket.copy_key(
                new_key_name=new_key_name,
                src_bucket_name=source_bucket.name,
                src_key_name=key
            )

        logger.info('Bucket data successfully copied to the target storage backend.')
test_assets.py 文件源码 项目:asset-manager 作者: emfoundation 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_bucket_contents(self):
        contents = s3_utils.s3.list_objects(Bucket = settings.AWS_STORAGE_BUCKET_NAME)
        bucket_contents = []
        if 'Contents' in contents:
            for obj in contents['Contents']:
                bucket_contents.append(obj['Key'])
        return bucket_contents
test_assets.py 文件源码 项目:asset-manager 作者: emfoundation 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def clear_bucket(self):
        """
        Empty S3 Bucket to ensure no cross-contamination between tests
        """
        logging.info('Clearing bucket...')
        contents = s3_utils.s3.list_objects(Bucket = settings.AWS_STORAGE_BUCKET_NAME)
        if 'Contents' in contents:
            for obj in contents['Contents']:
                s3_utils.delete_s3_object(obj['Key'])
views.py 文件源码 项目:aws-for-dummies 作者: tramwaj29 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_signed_upload_url():
    bucket = settings.AWS_STORAGE_BUCKET_NAME
    image = Image.objects.create()
    filename = '{}_{}.jpg'.format(image.id, uuid.uuid4())
    signed_upload = s3_connection.generate_url(5*60, 'PUT', bucket, filename, headers={'Content-Type': 'image/jpeg'})
    return signed_upload
amazons3service.py 文件源码 项目:itaplay 作者: lhalam 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def delete_from_amazon_with_boto(url):
    """Function that delete clip from amazon

        Returns : True
    """
    conn = S3Connection(local_settings.AWS_ACCESS_KEY_ID,
                        local_settings.AWS_SECRET_ACCESS_KEY)
    bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
    k = boto.s3.key.Key(bucket)
    filename_from_url = url.split('/')[-1]
    k.key = settings.MEDIAFILES_LOCATION + filename_from_url
    bucket.delete_key(k)
    return True
models.py 文件源码 项目:perdiem-django 作者: RevolutionTech 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_temporary_url(self, ttl=60):
        if hasattr(settings, 'AWS_STORAGE_BUCKET_NAME'):
            s3 = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, is_secure=True)
            key = "{media}/{filename}".format(media=settings.MEDIAFILES_LOCATION, filename=self.file.name)
            return s3.generate_url(ttl, 'GET', bucket=settings.AWS_STORAGE_BUCKET_NAME, key=key)
        return self.file.url
models.py 文件源码 项目:pfb-network-connectivity 作者: azavea 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def run(self):
        """ Run the analysis job, configuring ENV appropriately """
        if self.status != self.Status.CREATED:
            logger.warn('Attempt to re-run job: {}. Skipping.'.format(self.uuid))
            return

        # Provide the base environment to enable runnin Django commands in the container
        environment = self.base_environment()
        # Job-specific settings
        environment.update({
            'NB_TEMPDIR': os.path.join('/tmp', str(self.uuid)),
            'PGDATA': os.path.join('/pgdata', str(self.uuid)),
            'PFB_SHPFILE_URL': self.neighborhood.boundary_file.url,
            'PFB_STATE': self.neighborhood.state_abbrev,
            'PFB_STATE_FIPS': self.neighborhood.state.fips,
            'PFB_JOB_ID': str(self.uuid),
            'AWS_STORAGE_BUCKET_NAME': settings.AWS_STORAGE_BUCKET_NAME,
            'PFB_S3_RESULTS_PATH': self.s3_results_path
        })
        if self.osm_extract_url:
            environment['PFB_OSM_FILE_URL'] = self.osm_extract_url

        # Workaround for not being able to run development jobs on the actual batch cluster:
        # bail out with a helpful message
        if settings.DJANGO_ENV == 'development':
            logger.warn("Can't actually run development analysis jobs on AWS. Try this:"
                        "\nPFB_JOB_ID='{PFB_JOB_ID}' PFB_S3_RESULTS_PATH='{PFB_S3_RESULTS_PATH}' "
                        "./scripts/run-local-analysis "
                        "'{PFB_SHPFILE_URL}' {PFB_STATE} {PFB_STATE_FIPS}".format(**environment))
            self.generate_tiles()
            return

        client = boto3.client('batch')
        container_overrides = {
            'environment': create_environment(**environment),
        }
        try:
            response = client.submit_job(
                jobName=self.analysis_job_name,
                jobDefinition=self.analysis_job_definition,
                jobQueue=settings.PFB_AWS_BATCH_ANALYSIS_JOB_QUEUE_NAME,
                containerOverrides=container_overrides)
            self.batch_job_id = response['jobId']
            self.save()
            self.update_status(self.Status.QUEUED)
        except (botocore.exceptions.BotoCoreError, KeyError):
            logger.exception('Error starting AnalysisJob {}'.format(self.uuid))
        else:
            self.generate_tiles()
renderer.py 文件源码 项目:arxiv-vanity 作者: arxiv-vanity 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def render_paper(source, output_path, webhook_url=None):
    """
    Render a source directory using Engrafo.
    """
    try:
        os.makedirs(output_path)
    except FileExistsError:
        pass
    client = create_client()

    labels = {}
    environment = {}
    volumes = {}
    network = None

    # Production
    if settings.MEDIA_USE_S3:
        source = f"s3://{settings.AWS_STORAGE_BUCKET_NAME}/{source}"
        output_path = f"s3://{settings.AWS_STORAGE_BUCKET_NAME}/{output_path}"
        environment['AWS_ACCESS_KEY_ID'] = settings.AWS_ACCESS_KEY_ID
        environment['AWS_SECRET_ACCESS_KEY'] = settings.AWS_SECRET_ACCESS_KEY
        environment['AWS_S3_REGION_NAME'] = settings.AWS_S3_REGION_NAME
    # Development
    else:
        # HACK(bfirsh): MEDIA_ROOT is an absolute path to something on
        # the host machine. We need to make this relative to a mount inside the
        # Docker container.
        docker_media_root = os.path.join(
            '/mnt',
            os.path.basename(settings.MEDIA_ROOT)
        )
        source = os.path.join(docker_media_root, source)
        output_path = os.path.join(docker_media_root, output_path)
        # HOST_PWD is set in docker-compose.yml
        volumes[os.environ['HOST_PWD']] = {'bind': '/mnt', 'mode': 'rw'}

        network = 'arxivvanity_default'

    if settings.ENGRAFO_USE_HYPER_SH:
        labels['sh_hyper_instancetype'] = settings.HYPER_INSTANCE_TYPE

    container = client.containers.run(
        settings.ENGRAFO_IMAGE,
        'sh -c ' + shlex.quote('; '.join(make_command(source, output_path, webhook_url))),
        volumes=volumes,
        environment=environment,
        labels=labels,
        network=network,
        detach=True,
    )
    return container.id
download_data.py 文件源码 项目:intake 作者: codeforamerica 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def handle(self, *args, **kwargs):
        """Downloads a single full-database fixture into db and syncs s3
        by ./manage.py download_data

        1. sync replica from origin
        2. pull fixture from bucket to local tempfile
        3. drops all tables in the public schema of the existing database
        4. load local fixture tempfile

        Relevant settings:
            ORIGIN_MEDIA_BUCKET_FOR_SYNC - bucket to pull from for sync
            AWS_STORAGE_BUCKET_NAME - bucket to overwrite with new files
            SYNC_BUCKET - bucket to pull fixture from
            SYNC_FIXTURE_LOCATION - filename used for fixture

        Assumes that a db fixture has already been dumped to SYNC_BUCKET
        by ./manage.py upload_data
        Relevant settings:
        """
        if not settings.ORIGIN_MEDIA_BUCKET_FOR_SYNC:
            raise Exception(
                "Warning: ORIGIN_MEDIA_BUCKET_FOR_SYNC not set."
                "Its likely this is production. This Error has protected you.")
        sync_s3 = [
            settings.AWS_CLI_LOCATION,
            's3', 'sync',
            's3://%s' % settings.ORIGIN_MEDIA_BUCKET_FOR_SYNC,  # sync from
            's3://%s' % settings.AWS_STORAGE_BUCKET_NAME,  # sync to
        ]  # syncs replica from origin
        aws_open(sync_s3)

        download_s3 = [
            settings.AWS_CLI_LOCATION,
            's3', 'mv',
            's3://%s/%s' % (
                settings.SYNC_BUCKET,  # bucket to pull from
                ntpath.basename(settings.SYNC_FIXTURE_LOCATION),  # filename
            ),
            settings.SYNC_FIXTURE_LOCATION,  # local temp filename
        ]  # command to pull down fixture to local file, with aws env vars
        aws_open(download_s3)
        table_names = run_sql(
            "select tablename from pg_tables where schemaname = 'public'")
        for table_name in table_names:
            print(table_name)
            drop_table(table_name[0])
        pg_load(settings.SYNC_FIXTURE_LOCATION)


问题


面经


文章

微信
公众号

扫码关注公众号