def run(self):
"""Run command."""
self.run_command('ldist')
ldist_cmd = self.get_finalized_command('ldist')
dist_path = getattr(ldist_cmd, 'dist_path', None)
dist_name = getattr(ldist_cmd, 'dist_name', None)
if dist_path is None or dist_name is None:
raise DistutilsArgError('\'ldist\' missing attributes')
dist_name = getattr(self, 's3_prefix') + dist_name
s3 = boto3.client(
's3',
aws_access_key_id=getattr(self, 'access_key'),
aws_secret_access_key=getattr(self, 'secret_access_key'),
config=Config(signature_version='s3v4')
)
log.info('uploading {} to {} using kms key {}'.format(
dist_name,
getattr(self, 's3_bucket'),
getattr(self, 'kms_key_id')
))
with open(dist_path, 'rb') as dist:
if getattr(self, 'kms_key_id'):
response = s3.put_object(
Body=dist,
Bucket=getattr(self, 's3_bucket'),
Key=dist_name,
ServerSideEncryption='aws:kms',
SSEKMSKeyId=getattr(self, 'kms_key_id')
)
else:
response = s3.put_object(
Body=dist,
Bucket=getattr(self, 's3_bucket'),
Key=dist_name,
ServerSideEncryption='AES256'
)
log.info('upload complete:\n{}'.format(
json.dumps(response, sort_keys=True, indent=4, separators=(',', ': ')))
)
python类Config()的实例源码
def setup_s3_client(self):
"""Creates an authenticated s3 client.
:return: S3 client instance.
:rtype: botocore.client.BaseClient
"""
session = boto3.Session(aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key)
s3_config = Config(connect_timeout=S3_CONNECT_TIMEOUT,
read_timeout=S3_READ_TIMEOUT)
client = session.client('s3', region_name=self.default_region,
config=s3_config)
return client
def _upload_object(self, file_obj, object_key):
"""Upload objects to S3 in streaming fashion.
:param file file_obj: A file like object to upload. At a minimum, it
must implement the read method, and must return bytes.
:param str object_key: The destination key where to upload the object.
:raise S3DestinationError: if failed to upload object.
"""
remote_name = "s3://{bucket}/{name}".format(
bucket=self.bucket,
name=object_key
)
LOG.debug("Generating S3 transfer config")
s3_transfer_config = self.get_transfer_config()
LOG.debug("Starting to stream to %s", remote_name)
try:
self.s3_client.upload_fileobj(file_obj,
self.bucket,
object_key,
Config=s3_transfer_config)
LOG.debug("Successfully streamed to %s", remote_name)
except ClientError as err:
raise S3DestinationError(err)
return self._validate_upload(object_key)
def __init__(self, config=None):
self.config = DEFAULT_SETTINGS
self.config.update(config or settings.DJAMAZING)
self.cloud_front_base_url = self.config['CLOUDFRONT_URL']
self.bucket = boto3.resource(
's3',
aws_access_key_id=self.config['S3_KEY_ID'],
aws_secret_access_key=self.config['S3_SECRET_KEY'],
config=Config(signature_version='s3v4')
).Bucket(self.config['S3_BUCKET'])
self._init_protected_mode(self.config)
def get_s3():
url, key, secret = map(os.environ.get, ["S3_ENDPOINT_URL", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"])
s3 = False
if url and key and secret:
with utils.temp_loglevel():
s3 = boto3.client('s3', endpoint_url=os.environ["S3_ENDPOINT_URL"],
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
config=Config(signature_version='s3v4'), region_name='us-east-1')
return s3
def initialize(cls):
"""
This will be called the first time a toolbox of this type is created. This is meant for installing libraries if they are
needed. For example, if you need boto3 for an Amazon Handler, you would call
FloatingTools.installPackage('boto3', 'boto') here. This is also meant for any other set up such as getting login data.
.. note::
This is only called once during the first call to create a toolbox if this type.
.. code-block:: python
:linenos:
@classmethod
def initialize(cls):
# install the aws api lib through pip
FloatingTools.installPackage('boto3', 'boto')
import boto3
from botocore.client import Config
# set log in data for AWS
os.environ['AWS_ACCESS_KEY_ID'] = cls.userData()['access key']
os.environ['AWS_SECRET_ACCESS_KEY'] = cls.userData()['secret key']
cls.CONNECTION = boto3.resource('s3', config=Config(signature_version='s3v4'))
"""
pass
def initiator(globalBaseUrl):
"""take a url and set up s3 auth. Then call the driver"""
global s3
# alternate way to authenticate in else.
# use what you prefer
if True:
access_key = os.environ.get('AWS_ACCESS_KEY_ID')
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
if access_key is None or secret_key is None:
print printWarning("""No access credentials available.
Please export your AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
Details: http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html
""")
sys.exit(0)
s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))
else:
# If you prefer to supply the credentials here,
# make sure you flip the if condition to False
# and subsitiute the necessary data :)
s3 = boto3.resource('s3',
aws_access_key_id=ACCESS_ID,
aws_secret_access_key=ACCESS_KEY,
config=Config(signature_version='s3v4')
)
print printScreen("[>]Initiating...", "blue")
print printScreen("[>]Press Ctrl+C to terminate script", "blue")
scanner(globalBaseUrl)
driver(globalBaseUrl)
transit-vpc-push-juniper-config.py 文件源码
项目:aws-transitVPC-jnpr
作者: serrollc
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def pushConfig(ssh, config):
#log.info("Starting to push config")
#ssh.send('term len 0\n')
#prompt(ssh)
#CISCO --ssh.send('config t\n')
log.info("Config received for push %s", config)
ssh.send('edit\n')
log.debug("%s", prompt(ssh))
stime = time.time()
for line in config[0].split("\n"):
if line == "WAIT":
log.debug("Waiting 30 seconds...")
time.sleep(30)
else:
ssh.send(line+'\n')
log.info("%s", prompt(ssh))
log.info("Saving config!")
ssh.send('save /var/log/AWS_config.txt\n\n\n\n\n')
log.info("Saved config!")
time.sleep(15)
#log.info("%s", prompt(ssh))
log.info("Committing---")
ssh.send('commit\n')
time.sleep(30)
ssh.send('exit\n')
#log.info("%s", prompt(ssh))
log.debug(" --- %s seconds ---", (time.time() - stime))
##ssh.send('copy run start\n\n\n\n\n')
ssh.send('exit\n')
#log.info("%s", prompt(ssh))
log.info("Update complete!")
#Logic to determine the bucket prefix from the S3 key name that was provided
transit-vpc-push-juniper-config.py 文件源码
项目:aws-transitVPC-jnpr
作者: serrollc
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def getTransitConfig(bucket_name, bucket_prefix, s3_url, config_file):
s3 = boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Downloading config file: %s/%s/%s%s", s3_url, bucket_name, bucket_prefix,config_file)
return ast.literal_eval(s3.get_object(Bucket=bucket_name,Key=bucket_prefix+config_file)['Body'].read())
#Logic to upload a new/updated transit VPC configuration file to S3 (not currently used)
transit-vpc-push-juniper-config.py 文件源码
项目:aws-transitVPC-jnpr
作者: serrollc
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def putTransitConfig(bucket_name, bucket_prefix, s3_url, config_file, config):
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Uploading new config file: %s/%s/%s%s", s3_url,bucket_name, bucket_prefix,config_file)
s3.put_object(Bucket=bucket_name,Key=bucket_prefix+config_file,Body=str(config))
#Logic to download the SSH private key from S3 to be used for SSH public key authentication
transit-vpc-push-juniper-config.py 文件源码
项目:aws-transitVPC-jnpr
作者: serrollc
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def downloadPrivateKey(bucket_name, bucket_prefix, s3_url, prikey):
if os.path.exists('/tmp/'+prikey):
os.remove('/tmp/'+prikey)
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Downloading private key: %s/%s/%s%s",s3_url, bucket_name, bucket_prefix, prikey)
s3.download_file(bucket_name,bucket_prefix+prikey, '/tmp/'+prikey)
#Logic to create the appropriate Sysco configuration
def handle_BucketVersioningConfiguration(self, resource, item_value):
# Config defaults versioning to 'Off' for a null value
if item_value['status'] not in ('Enabled', 'Suspended'):
return
resource['Versioning'] = {'Status': item_value['status']}
if item_value['isMfaDeleteEnabled']:
resource['Versioning']['MFADelete'] = item_value[
'isMfaDeleteEnabled'].title()
def bucket_client(session, b, kms=False):
region = get_region(b)
if kms:
# Need v4 signature for aws:kms crypto, else let the sdk decide
# based on region support.
config = Config(
signature_version='s3v4',
read_timeout=200, connect_timeout=120)
else:
config = Config(read_timeout=200, connect_timeout=120)
return session.client('s3', region_name=region, config=config)
def get_build_artifact_id(build_id):
"""Get artifact (build.json) from the build project . We are making this as an additional call to get the build.json
which already contains the new built repository ECR path. We could have consolidated this script and executed in the build
phase, but as codebuild accepts the input from one source only (scripts and application code are in different sources), thats
why an additional call to retrieve build.json from a different build project.
Args:
build_id - Build ID for codebuild (build phase)
Returns:
build.json
Raises:
Exception: Any exception thrown by handler
"""
codebuild_client = boto3.client('codebuild')
response = codebuild_client.batch_get_builds(
ids=[
str(build_id),
]
)
for build in response['builds']:
s3_location = build['artifacts']['location']
bucketkey = s3_location.split(":")[5]
bucket = bucketkey.split("/")[0]
key = bucketkey[bucketkey.find("/") + 1:]
s3_client = boto3.client('s3', config=Config(signature_version='s3v4'))
s3_client.download_file(bucket, key, 'downloaded_object')
zip_ref = zipfile.ZipFile('downloaded_object', 'r')
zip_ref.extractall('downloaded_folder')
zip_ref.close()
with open('downloaded_folder/build.json') as data_file:
objbuild = json.load(data_file)
print(objbuild['tag'])
return objbuild['tag']
def get_aws_client(client_type, config=None):
if not config:
config = Config(signature_version='s3v4')
aws_access_key = settings.CONFIGURATION.lookup('aws:access_key')
aws_secret_key = settings.CONFIGURATION.lookup('aws:secret_key')
if aws_access_key and aws_secret_key:
c = client(client_type, config=config, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
else:
c = client(client_type, config=config)
return c
def create_client(stage_info, use_accelerate_endpoint=False):
"""
Creates a client object with a stage credential
:param stage_credentials: a stage credential
:param use_accelerate_endpoint: is accelerate endpoint?
:return: client
"""
logger = getLogger(__name__)
stage_credentials = stage_info[u'creds']
security_token = stage_credentials.get(u'AWS_TOKEN', None)
logger.debug(u"AWS_ID: %s", stage_credentials[u'AWS_ID'])
config = Config(
signature_version=u's3v4',
s3={
'use_accelerate_endpoint': use_accelerate_endpoint,
})
client = boto3.resource(
u's3',
region_name=stage_info['region'],
aws_access_key_id=stage_credentials[u'AWS_ID'],
aws_secret_access_key=stage_credentials[u'AWS_KEY'],
aws_session_token=security_token,
config=config,
)
return client
def getCert(self, payload):
payload['kmsauth_token'] = self.kmsauth_token
payload_json = json.dumps(payload)
lambdabotoconfig = Config(
connect_timeout=self.config['timeoutconfig']['connect'],
read_timeout=self.config['timeoutconfig']['read']
)
try:
mfa_lambda_client = boto3.client(
'lambda',
region_name=self.region,
aws_access_key_id=self.creds['AccessKeyId'],
aws_secret_access_key=self.creds['SecretAccessKey'],
aws_session_token=self.creds['SessionToken'],
config=lambdabotoconfig
)
response = mfa_lambda_client.invoke(
FunctionName=self.config['functionname'],
InvocationType='RequestResponse',
LogType='Tail',
Payload=payload_json,
Qualifier=self.config['functionversion']
)
if response['StatusCode'] != 200:
raise LambdaInvocationException('Error creating cert.')
except ConnectTimeout:
raise LambdaInvocationException('Timeout connecting to Lambda')
except ReadTimeout:
raise LambdaInvocationException('Timeout reading cert from Lambda')
except SSLError:
raise LambdaInvocationException('SSL error connecting to Lambda')
except ValueError:
# On a 404, boto tries to decode any body as json
raise LambdaInvocationException('Invalid message format in Lambda response')
payload = json.loads(response['Payload'].read())
if 'certificate' not in payload:
raise LambdaInvocationException('No certificate in response.')
return payload['certificate']
def __init__(self, endpoint_url, bucket_prefix):
self.s3 = boto3.resource('s3', endpoint_url=endpoint_url,
aws_access_key_id=os.environ['S3_KEY'],
aws_secret_access_key=os.environ['S3_SECRET'],
region_name='us-east-1',
config=Config(signature_version='s3v4'))
self.bucket_prefix = bucket_prefix
def connect(self, refresh=False):
"""
Establish S3 connection object.
Parameters
----------
refresh : bool (True)
Whether to use cached filelists, if already read
"""
anon, key, secret, kwargs, ckwargs, token, ssl = (
self.anon, self.key, self.secret, self.kwargs,
self.client_kwargs, self.token, self.use_ssl)
# Include the current PID in the connection key so that different
# SSL connections are made for each process.
tok = tokenize(anon, key, secret, kwargs, ckwargs, token,
ssl, os.getpid())
if refresh:
self._conn.pop(tok, None)
if tok not in self._conn:
logger.debug("Open S3 connection. Anonymous: %s", self.anon)
if self.anon:
from botocore import UNSIGNED
conf = Config(connect_timeout=self.connect_timeout,
read_timeout=self.read_timeout,
signature_version=UNSIGNED, **self.config_kwargs)
self.session = boto3.Session(**self.kwargs)
else:
conf = Config(connect_timeout=self.connect_timeout,
read_timeout=self.read_timeout,
**self.config_kwargs)
self.session = boto3.Session(self.key, self.secret, self.token,
**self.kwargs)
s3 = self.session.client('s3', config=conf, use_ssl=ssl,
**self.client_kwargs)
self._conn[tok] = (s3, self.session)
else:
s3, session = self._conn[tok]
self.session = session
return s3
def get_s3_client():
return boto3.client(
's3', 'us-east-1', config=Config(
s3={'addressing_style': 'path'}
)
)