def make_session(identity_profile):
session = botocore.session.Session(profile=identity_profile)
try:
session3 = boto3.session.Session(botocore_session=session)
except botocore.exceptions.ProfileNotFound as err:
print(str(err), file=sys.stderr)
if session.available_profiles:
print("Available profiles: %s" %
", ".join(sorted(session.available_profiles)), file=sys.stderr)
print("You can specify a profile by passing it with the -i "
"command line flag.", file=sys.stderr)
else:
print("You have no AWS profiles configured. Please run 'aws "
"configure --profile identity' to get started.", file=sys.stderr)
return None, None, USER_RECOVERABLE_ERROR
return session, session3, None
python类exceptions()的实例源码
def file_exists(self, remote_path):
"""
Check if the file we are trying to upload already exists in S3
:param remote_path:
:return: True, if file exists. False, otherwise
"""
try:
# Find the object that matches this ETag
self.s3.head_object(
Bucket=self.bucket_name, Key=remote_path)
return True
except botocore.exceptions.ClientError:
# Either File does not exist or we are unable to get
# this information.
return False
def retry(fn, max_attempts=10, delay=0.200):
attempts = 0
while True:
try:
fn()
break
except botocore.exceptions.ClientError as error:
# Only retry on boto's ClientError/NoSuchEntity error
if error.response["Error"]["Code"] == "NoSuchEntity":
LOG.warning(
"error while attaching role to policy: {}.".format(error))
attempts += 1
if attempts < max_attempts:
LOG.warning("Retry in {}s...".format(delay))
time.sleep(delay)
else:
raise
else:
raise
def test_create_role_raises_error_on_failure(self, stubbed_session):
arn = 'good_arn' * 3
role_id = 'abcd' * 4
today = datetime.datetime.today()
stubbed_session.stub('iam').create_role(
RoleName='role_name',
AssumeRolePolicyDocument=json.dumps({'trust': 'policy'})
).returns({'Role': {
'RoleName': 'No', 'Arn': arn, 'Path': '/',
'RoleId': role_id, 'CreateDate': today}}
)
stubbed_session.stub('iam').put_role_policy(
RoleName='role_name',
PolicyName='role_name',
PolicyDocument={'policy': 'document'}
).raises_error(
error_code='MalformedPolicyDocumentException',
message='MalformedPolicyDocument'
)
stubbed_session.activate_stubs()
awsclient = TypedAWSClient(stubbed_session)
with pytest.raises(botocore.exceptions.ClientError):
awsclient.create_role(
'role_name', {'trust': 'policy'}, {'policy': 'document'})
stubbed_session.verify_stubs()
def test_create_function_fails_after_max_retries(self, stubbed_session):
kwargs = {
'FunctionName': 'name',
'Runtime': 'python2.7',
'Code': {'ZipFile': b'foo'},
'Handler': 'app.app',
'Role': 'myarn',
}
for _ in range(TypedAWSClient.LAMBDA_CREATE_ATTEMPTS):
stubbed_session.stub('lambda').create_function(
**kwargs).raises_error(
error_code='InvalidParameterValueException',
message=('The role defined for the function cannot '
'be assumed by Lambda.')
)
stubbed_session.activate_stubs()
awsclient = TypedAWSClient(stubbed_session, mock.Mock(spec=time.sleep))
with pytest.raises(LambdaClientError) as excinfo:
awsclient.create_function('name', 'myarn', b'foo', 'python2.7',
'app.app')
assert isinstance(
excinfo.value.original_error, botocore.exceptions.ClientError)
stubbed_session.verify_stubs()
def test_create_function_propagates_unknown_error(self, stubbed_session):
kwargs = {
'FunctionName': 'name',
'Runtime': 'python2.7',
'Code': {'ZipFile': b'foo'},
'Handler': 'app.app',
'Role': 'myarn',
}
stubbed_session.stub('lambda').create_function(
**kwargs).raises_error(
error_code='UnknownException', message='')
stubbed_session.activate_stubs()
awsclient = TypedAWSClient(stubbed_session, mock.Mock(spec=time.sleep))
with pytest.raises(LambdaClientError) as excinfo:
awsclient.create_function('name', 'myarn', b'foo', 'pytohn2.7',
'app.app')
assert isinstance(
excinfo.value.original_error, botocore.exceptions.ClientError)
stubbed_session.verify_stubs()
def test_update_function_fails_after_max_retries(self, stubbed_session):
stubbed_session.stub('lambda').update_function_code(
FunctionName='name', ZipFile=b'foo').returns(
{'FunctionArn': 'arn'})
update_config_kwargs = {
'FunctionName': 'name',
'Role': 'role-arn'
}
for _ in range(TypedAWSClient.LAMBDA_CREATE_ATTEMPTS):
stubbed_session.stub('lambda').update_function_configuration(
**update_config_kwargs).raises_error(
error_code='InvalidParameterValueException',
message=('The role defined for the function cannot '
'be assumed by Lambda.'))
stubbed_session.activate_stubs()
awsclient = TypedAWSClient(stubbed_session, mock.Mock(spec=time.sleep))
with pytest.raises(botocore.exceptions.ClientError):
awsclient.update_function('name', b'foo', role_arn='role-arn')
stubbed_session.verify_stubs()
def boto3_exception_handler(f):
"""Capture and pretty print exceptions"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except (botocore.exceptions.ClientError,
botocore.exceptions.WaiterError,
botocore.exceptions.ParamValidationError,
ConfigError) as e:
click.secho(str(e), fg='red')
except KeyboardInterrupt as e:
click.secho('Aborted.', fg='red')
return wrapper
weatherstation_data_populate.py 文件源码
项目:aCloudGuru-DynamoDB
作者: acantril
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def c_table (c): # create dynamo DB tables
try:
print "INFO :: Creating %s Table....." % c['TableName']
db_r.create_table(**c)
print "INFO :: Waiting for completion..."
db_r.Table(c['TableName']).wait_until_exists()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ResourceInUseException":
print "INFO :: WeatherstationInc %s Table exists, deleting ...." % c['TableName']
db_r.Table(c['TableName']).delete()
print "INFO :: Waiting for delete.."
db_r.Table(c['TableName']).wait_until_not_exists()
c_table (c)
else:
print "Unknown Error"
#------------------------------------------------------------------------------
def c_table (Table, t_config): # create dynamo DB tables
"""
try to create table, if it errors tables exist,
drop the tables, and then rerun the function to create again.
"""
try:
print "INFO :: Creating %s Table....." % Table
db_r.create_table(
AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
TableName=Table,
KeySchema = t_config[Table]['KeySchema'],
ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
)
print "INFO :: Waiting for completion..."
db_r.Table(Table).wait_until_exists()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ResourceInUseException":
print "INFO :: Learning Online %s Table exists, deleting ...." % Table
db_r.Table(Table).delete()
print "INFO :: Waiting for delete.."
db_r.Table(Table).wait_until_not_exists()
c_table (Table, t_config)
else:
print "Unknown Error"
#------------------------------------------------------------------------------
def c_table (Table, t_config): # create dynamo DB tables
"""
try to create table, if it errors tables exist,
drop the tables, and then rerun the function to create again.
"""
try:
print "INFO :: Creating %s Table....." % Table
db_r.create_table(
AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
TableName=Table,
KeySchema = t_config[Table]['KeySchema'],
ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
)
print "INFO :: Waiting for completion..."
db_r.Table(Table).wait_until_exists()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ResourceInUseException":
print "INFO :: Learning Online %s Table exists, deleting ...." % Table
db_r.Table(Table).delete()
print "INFO :: Waiting for delete.."
db_r.Table(Table).wait_until_not_exists()
c_table (Table, t_config)
else:
print "Unknown Error"
#------------------------------------------------------------------------------
def c_table (Table, t_config): # create dynamo DB tables
"""
try to create table, if it errors tables exist,
drop the tables, and then rerun the function to create again.
"""
try:
print "INFO :: Creating %s Table....." % Table
db_r.create_table(
AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
TableName=Table,
KeySchema = t_config[Table]['KeySchema'],
ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
)
print "INFO :: Waiting for completion..."
db_r.Table(Table).wait_until_exists()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ResourceInUseException":
print "INFO :: Learning Online %s Table exists, deleting ...." % Table
db_r.Table(Table).delete()
print "INFO :: Waiting for delete.."
db_r.Table(Table).wait_until_not_exists()
c_table (Table, t_config)
else:
print "Unknown Error"
#------------------------------------------------------------------------------
def c_table (Table, t_config): # create dynamo DB tables
"""
try to create table, if it errors tables exist,
drop the tables, and then rerun the function to create again.
"""
try:
print "INFO :: Creating %s Table....." % Table
db_r.create_table(
AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
TableName=Table,
KeySchema = t_config[Table]['KeySchema'],
ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
)
print "INFO :: Waiting for completion..."
db_r.Table(Table).wait_until_exists()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ResourceInUseException":
print "INFO :: Learning Online %s Table exists, deleting ...." % Table
db_r.Table(Table).delete()
print "INFO :: Waiting for delete.."
db_r.Table(Table).wait_until_not_exists()
c_table (Table, t_config)
else:
print "Unknown Error"
#------------------------------------------------------------------------------
def c_table (Table, t_config): # create dynamo DB tables
"""
try to create table, if it errors tables exist,
drop the tables, and then rerun the function to create again.
"""
try:
print "INFO :: Creating %s Table....." % Table
db_r.create_table(
AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
TableName=Table,
KeySchema = t_config[Table]['KeySchema'],
ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
)
print "INFO :: Waiting for completion..."
db_r.Table(Table).wait_until_exists()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ResourceInUseException":
print "INFO :: Learning Online %s Table exists, deleting ...." % Table
db_r.Table(Table).delete()
print "INFO :: Waiting for delete.."
db_r.Table(Table).wait_until_not_exists()
c_table (Table, t_config)
else:
print "Unknown Error"
#------------------------------------------------------------------------------
def get_object_md5_checksum(bucket, key):
"""This function returns the MD5 checksum for the remote file.
If the file was uploaded as a single-part file, the MD5 checksum will be
the checksum of the file content.
However, if the file was uploaded as multi-part file,
AWS is calculating the MD5 the following way (Based on AWS documentation):
1. Calculate the MD5 md5_hash for each uploaded part of the file.
2. Concatenate the hashes into a single binary string.
3. Calculate the MD5 md5_hash of that result.
4. Concatenate the resulted MD5 md5_hash with a dash
and number of file parts.
:param bucket: The name of the bucket.
:param key: The full path to the remote file.
:return: The MD5 checksum for the remote file.
"""
try:
md5_checksum = s3_client.head_object(
Bucket=bucket,
Key=key
)['ETag'][1:-1]
except botocore.exceptions.ClientError:
md5_checksum = ''
return md5_checksum
def delete_bucket(bucket_name: str, region: str, key: str, secret: str):
print("Deleting bucket {}".format(bucket_name))
s3_client = boto3.client(
service_name='s3',
region_name=region,
aws_access_key_id=key,
aws_secret_access_key=secret
)
try:
contents = s3_client.list_objects(Bucket=bucket_name).get('Contents')
while contents is not None:
delete_keys = [{'Key': o.get('Key')} for o in contents]
s3_client.delete_objects(Bucket=bucket_name, Delete={
'Objects': delete_keys
})
contents = s3_client.list_objects(Bucket=bucket_name).get('Contents')
s3_client.delete_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
error = e.response.get('Error')
if not error or error.get('Code') != 'NoSuchBucket':
raise e
def get_subnets(self):
try:
client = boto3.client('ec2',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region_name)
except Exception as e:
raise AWSException("There was an error connecting to EC2: %s" % e)
# Search EC2 for the VPC subnets
try:
return client.describe_subnets()
except botocore.exceptions.ClientError as e:
raise AWSException("There was an error describing the VPC Subnets: %s" %
e.response["Error"]["Message"])
except botocore.exceptions.ParamValidationError as e:
raise AWSException("There was an error describing the VPC Subnets: %s" % e)
def list_clusters(self):
try:
client = boto3.client('emr',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region_name)
except Exception as e:
raise AWSException("There was an error connecting to EMR: %s" % e)
try:
cluster_list = client.list_clusters()
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "AuthFailure":
raise AWSException("Invalid AWS access key id or aws secret access key")
else:
raise AWSException("There was an error creating a new EMR cluster: %s" %
e.response["Error"]["Message"])
except Exception as e:
raise AWSException("Unknown Error: %s" % e)
return cluster_list
def describe_cluster(self, cluster_id):
try:
client = boto3.client('emr',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region_name)
except Exception as e:
raise AWSException("There was an error connecting to EMR: %s" % e)
try:
return client.describe_cluster(ClusterId=cluster_id)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "AuthFailure":
raise AWSException("Invalid AWS access key id or aws secret access key")
else:
raise AWSException("There was an error describing the EMR cluster: %s" %
e.response["Error"]["Message"])
except Exception as e:
raise AWSException("Unknown Error: %s" % e)
def list_bootstrap_actions(self, cluster_id):
try:
client = boto3.client('emr',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region_name)
except Exception as e:
raise AWSException("There was an error connecting to EMR: %s" % e)
try:
return client.list_bootstrap_actions(ClusterId=cluster_id)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "AuthFailure":
raise AWSException("Invalid AWS access key id or aws secret access key")
else:
raise AWSException("There was an error describing the EMR cluster: %s" %
e.response["Error"]["Message"])
except Exception as e:
raise AWSException("Unknown Error: %s" % e)
def terminate_cluster(self, cluster_id):
try:
client = boto3.client('emr',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region_name)
except Exception as e:
raise AWSException("There was an error connecting to EMR: %s" % e)
try:
client.terminate_job_flows(JobFlowIds=[cluster_id])
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "AuthFailure":
raise AWSException("Invalid AWS access key id or aws secret access key")
else:
raise AWSException("There was an error terminating the EMR cluster: %s" %
e.response["Error"]["Message"])
except Exception as e:
raise AWSException("Unknown Error: %s" % e)
def get_security_group_port_open(self, security_group_id, port):
try:
client = boto3.client('ec2',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region_name)
except Exception as e:
raise AWSException("There was an error connecting to EC2: %s" % e)
try:
response = client.describe_security_groups(GroupIds=[security_group_id])
# Loop through all of the security group permissions and if the port
for ip_permission in response["SecurityGroups"][0]["IpPermissions"]:
if ip_permission["FromPort"] == port and ip_permission["ToPort"] == port:
return True
return False
except botocore.exceptions.ClientError as e:
raise AWSException("There was an error describing the security group: %s" %
e.response["Error"]["Message"])
def one_mfa(args, credentials):
session, session3, err = make_session(args.identity_profile)
if err:
return err
if "AWSMFA_TESTING_MODE" in os.environ:
use_testing_credentials(args, credentials)
return OK
mfa_args = {}
if args.token_code != 'skip':
serial_number, token_code, err = acquire_code(args, session, session3)
if err is not OK:
return err
mfa_args['SerialNumber'] = serial_number
mfa_args['TokenCode'] = token_code
sts = session3.client('sts')
try:
if args.role_to_assume:
mfa_args.update(
DurationSeconds=min(args.duration, 3600),
RoleArn=args.role_to_assume,
RoleSessionName=args.role_session_name)
response = sts.assume_role(**mfa_args)
else:
mfa_args.update(DurationSeconds=args.duration)
response = sts.get_session_token(**mfa_args)
except botocore.exceptions.ClientError as err:
if err.response["Error"]["Code"] == "AccessDenied":
print(str(err), file=sys.stderr)
return USER_RECOVERABLE_ERROR
else:
raise
print_expiration_time(response['Credentials']['Expiration'])
update_credentials_file(args.aws_credentials,
args.target_profile,
args.identity_profile,
credentials,
response['Credentials'])
return OK
def wait(stack, show_events=False, last_event=None):
"""wait for stack action to complete"""
global REGION
stack_obj = boto3.resource('cloudformation', region_name=REGION).Stack(stack)
while True:
try:
stack_obj.reload()
# display new events
if show_events:
last_event = stack_events(stack, last_event=last_event)
# exit condition
if stack_obj.stack_status[-8:] == 'COMPLETE':
break
if stack_obj.stack_status == 'DELETE_FAILED':
break
except botocore.exceptions.ClientError:
break
# limit requests to API
sleep(5)
# command functions
def validate(args):
"""validate local stack(s)"""
stacks = args.stack
# validate all stacks
if args.all:
stacks = local_stacks()
# filter for existing stacks
elif stacks:
stacks = [stack for stack in stacks if stack in local_stacks()]
# bail if no stack to validate
if not stacks:
LOG.warning(
'this command needs a list of local stacks, or the --all flag to validate all stacks')
sys.exit(1)
# action
cfn = get_cfn()
retval = 0
for stack in stacks:
tpl_body = load_template(stack, True)
try:
cfn.validate_template(TemplateBody=tpl_body)
res = 'ok'
except botocore.exceptions.ClientError as err:
res = 'not ok: %s' % str(err)
retval = 1
print('%s:%s %s' % (stack, ''.rjust(max([len(s) for s in stacks]) - len(stack)), res))
sys.exit(retval)
def check_for_200_error(response, **kwargs):
# From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# There are two opportunities for a copy request to return an error. One
# can occur when Amazon S3 receives the copy request and the other can
# occur while Amazon S3 is copying the files. If the error occurs before
# the copy operation starts, you receive a standard Amazon S3 error. If the
# error occurs during the copy operation, the error response is embedded in
# the 200 OK response. This means that a 200 OK response can contain either
# a success or an error. Make sure to design your application to parse the
# contents of the response and handle it appropriately.
#
# So this handler checks for this case. Even though the server sends a
# 200 response, conceptually this should be handled exactly like a
# 500 response (with respect to raising exceptions, retries, etc.)
# We're connected *before* all the other retry logic handlers, so as long
# as we switch the error code to 500, we'll retry the error as expected.
if response is None:
# A None response can happen if an exception is raised while
# trying to retrieve the response. See Endpoint._get_response().
return
http_response, parsed = response
if _looks_like_special_case_error(http_response):
logger.debug("Error found for response with 200 status code, "
"errors: %s, changing status code to "
"500.", parsed)
http_response.status_code = 500
def check_for_200_error(response, **kwargs):
# From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# There are two opportunities for a copy request to return an error. One
# can occur when Amazon S3 receives the copy request and the other can
# occur while Amazon S3 is copying the files. If the error occurs before
# the copy operation starts, you receive a standard Amazon S3 error. If the
# error occurs during the copy operation, the error response is embedded in
# the 200 OK response. This means that a 200 OK response can contain either
# a success or an error. Make sure to design your application to parse the
# contents of the response and handle it appropriately.
#
# So this handler checks for this case. Even though the server sends a
# 200 response, conceptually this should be handled exactly like a
# 500 response (with respect to raising exceptions, retries, etc.)
# We're connected *before* all the other retry logic handlers, so as long
# as we switch the error code to 500, we'll retry the error as expected.
if response is None:
# A None response can happen if an exception is raised while
# trying to retrieve the response. See Endpoint._get_response().
return
http_response, parsed = response
if _looks_like_special_case_error(http_response):
logger.debug("Error found for response with 200 status code, "
"errors: %s, changing status code to "
"500.", parsed)
http_response.status_code = 500
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
def detach_bucket_policies(event, context):
"""
Detaches the team bucket IAM policies from the user's IAM role
event = {
"user": {"username": "alice"},
"team": {"slug": "justice-league"}
}
"""
username = event["user"]["username"]
team_slug = event["team"]["slug"]
client = boto3.client("iam")
errors = []
for policy_type in [POLICY_READ_WRITE, POLICY_READ_ONLY]:
# Be sure we detach all policies without stopping early
try:
client.detach_role_policy(
RoleName=naming.role_name(username),
PolicyArn=policy_arn(team_slug, policy_type),
)
except botocore.exceptions.ClientError as error:
# Ignoring this error raised when detaching a policy not attached
if error.response["Error"]["Code"] != "NoSuchEntity":
errors.append(error)
except Exception as error:
# Other exceptions are saved and raised after the loop
errors.append(error)
if errors:
message = "One or more errors occurred while detaching policies from role: {}".format(
errors)
LOG.error(message)
raise Exception(message)
def check_for_200_error(response, **kwargs):
# From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# There are two opportunities for a copy request to return an error. One
# can occur when Amazon S3 receives the copy request and the other can
# occur while Amazon S3 is copying the files. If the error occurs before
# the copy operation starts, you receive a standard Amazon S3 error. If the
# error occurs during the copy operation, the error response is embedded in
# the 200 OK response. This means that a 200 OK response can contain either
# a success or an error. Make sure to design your application to parse the
# contents of the response and handle it appropriately.
#
# So this handler checks for this case. Even though the server sends a
# 200 response, conceptually this should be handled exactly like a
# 500 response (with respect to raising exceptions, retries, etc.)
# We're connected *before* all the other retry logic handlers, so as long
# as we switch the error code to 500, we'll retry the error as expected.
if response is None:
# A None response can happen if an exception is raised while
# trying to retrieve the response. See Endpoint._get_response().
return
http_response, parsed = response
if _looks_like_special_case_error(http_response):
logger.debug("Error found for response with 200 status code, "
"errors: %s, changing status code to "
"500.", parsed)
http_response.status_code = 500