def get_credential_report(iam_client):
resp1 = iam_client.generate_credential_report()
if resp1['State'] == 'COMPLETE' :
try:
response = iam_client.get_credential_report()
credential_report_csv = response['Content']
# print(credential_report_csv)
reader = csv.DictReader(credential_report_csv.splitlines())
# print(reader.fieldnames)
credential_report = []
for row in reader:
credential_report.append(row)
return(credential_report)
except ClientError as e:
print("Unknown error getting Report: " + e.message)
else:
sleep(2)
return get_credential_report(iam_client)
# Query the account's password policy for the password age. Return that number of days
python类ClientError()的实例源码
def remote_get_lambda(self, **kwargs):
response = False
try:
response = self.awslambda.get_function(
FunctionName=kwargs["FunctionName"],
)
tags = response.get("Tags", False)
if tags:
response["Configuration"]["Tags"] = tags
except ClientError as e:
if e.response['Error']['Code'] == "ResourceNotFoundException":
return False
else:
# TODO: handle other errors there
pass
return response
def remote_update_alias(self, **kwargs):
conf = kwargs
try:
logger.info("Update alias {} for function {}"
" with version {}".format(conf["Name"],
conf["FunctionName"],
conf["FunctionVersion"]))
response = self.awslambda.update_alias(**conf)
except ClientError as e:
if e.response['Error']['Code'] == "ResourceNotFoundException":
logger.info("Alias {} not exist for function {}. "
"Creating new one with version {}".format(conf["Name"],
conf["FunctionName"],
conf["FunctionVersion"]))
response = self.awslambda.create_alias(**conf)
else:
# TODO: handle other errors there
pass
return response
def _create_lambda(arn, func_name, func_desc, lambda_handler, lambda_main,
runtime):
func = dict()
lamb = boto3.client('lambda')
with open(temp_deploy_zip) as deploy:
func['ZipFile'] = deploy.read()
try:
resp = lamb.create_function(
FunctionName=func_name, Runtime=runtime, Publish=True,
Description=func_desc,
Role=arn, Code=func, Handler='{0}.{1}'.format(
lambda_main, lambda_handler
))
logging.info("Create Lambda Function resp:{0}".format(
json.dumps(resp, indent=4, sort_keys=True))
)
return resp
except ClientError as ce:
if ce.response['Error']['Code'] == 'ValidationException':
logging.warning("Validation Error {0} creating function '{1}'.".format(
ce, func_name))
else:
logging.error("Unexpected Error: {0}".format(ce))
def _create_function_alias(func_alias, func_name, func_version):
lamb = boto3.client('lambda')
try:
resp = lamb.create_alias(
Name=func_alias,
FunctionName=func_name,
FunctionVersion=func_version
)
logging.info("Create Lambda Alias resp:{0}".format(
json.dumps(resp, indent=4, sort_keys=True))
)
return resp
except ClientError as ce:
if ce.response['Error']['Code'] == 'ValidationException':
logging.warning("Validation Error {0} creating alias '{1}'.".format(
ce, func_alias))
else:
logging.error("Unexpected Error: {0}".format(ce))
def _update_lambda_function(zip_file, func_name):
lamb = boto3.client('lambda')
try:
resp = lamb.update_function_code(
FunctionName=func_name,
ZipFile=zip_file.read(),
Publish=True
)
return resp['Version']
except ClientError as ce:
if ce.response['Error']['Code'] == 'ValidationException':
logging.warning(
"Validation Error {0} updating function '{1}'.".format(
ce, func_name))
else:
logging.error("Unexpected Error: {0}".format(ce))
def _update_lambda_alias(func_alias, func_name, func_version):
lamb = boto3.client('lambda')
try:
resp = lamb.update_alias(
Name=func_alias,
FunctionName=func_name,
FunctionVersion=func_version
)
return resp['AliasArn']
except ClientError as ce:
if ce.response['Error']['Code'] == 'ValidationException':
logging.warning(
"Validation Error {0} updating alias '{1}'.".format(
ce, func_name))
else:
logging.error("Unexpected Error: {0}".format(ce))
def get_role_arn(role_params):
if role_params['account_id']:
# Try to map the name to an account ID. If it isn't found, assume an ID was passed
# in and use it as-is.
role_params['account_id'] = app.config['AWS_ACCOUNT_MAP'].get(
role_params['account_id'],
role_params['account_id']
)
else:
if app.config['DEFAULT_ACCOUNT_ID']:
role_params['account_id'] = app.config['DEFAULT_ACCOUNT_ID']
# No default account id defined. Get the ARN by looking up the role
# name. This is a backwards compat use-case for when we didn't require
# the default account id.
else:
iam = iam_client()
try:
with PrintingBlockTimer('iam.get_role'):
role = iam.get_role(RoleName=role_params['name'])
return role['Role']['Arn']
except ClientError as e:
response = e.response['ResponseMetadata']
raise GetRoleError((response['HTTPStatusCode'], e.message))
# Return a generated ARN
return 'arn:aws:iam::{account_id}:role/{name}'.format(**role_params)
def test__init__():
mock_client = Mock()
with patch.object(boto3, 'client', return_value=mock_client):
error_response = {'Error': {'Code': 'ResourceInUseException'}}
mock_client.create_stream = Mock(side_effect=ClientError(error_response, 'create_stream'))
StreamWriter('blah')
assert mock_client.create_stream.called
assert call.get_waiter('stream_exists') in mock_client.method_calls, "We handled stream existence"
error_response = {'Error': {'Code': 'Something else'}}
mock_client.create_stream = Mock(side_effect=ClientError(error_response, 'create_stream'))
mock_client.reset_mock()
with pytest.raises(ClientError):
StreamWriter('blah')
assert mock_client.create_stream.called
assert call.get_waiter('stream_exists') not in mock_client.method_calls, "never reached"
def __init__(self, stream_name, back_off_limit=60, send_window=13):
self.stream_name = stream_name
self.back_off_limit = back_off_limit
self.last_send = 0
self._kinesis = boto3.client('kinesis')
self._sequence_number_for_ordering = '0'
self._record_agg = aws_kinesis_agg.aggregator.RecordAggregator()
self._send_window = send_window
try:
self._kinesis.create_stream(StreamName=stream_name, ShardCount=1)
except ClientError as e:
# ResourceInUseException is raised when the stream already exists
if e.response['Error']['Code'] != 'ResourceInUseException':
logger.error(e)
raise
waiter = self._kinesis.get_waiter('stream_exists')
# waits up to 180 seconds for stream to exist
waiter.wait(StreamName=self.stream_name)
def get_lifecycle(bucket_name, **conn):
try:
result = get_bucket_lifecycle_configuration(Bucket=bucket_name, **conn)
except ClientError as e:
if 'NoSuchLifecycleConfiguration' not in str(e):
raise e
return []
for rule in result['Rules']:
# Save all dates as a Proper ISO 8601 String:
for transition in rule.get('Transitions', []):
if 'Date' in transition:
transition['Date'] = transition["Date"].replace(tzinfo=None, microsecond=0).isoformat() + "Z"
if rule.get("Expiration"):
if 'Date' in rule["Expiration"]:
rule["Expiration"]["Date"] = \
rule["Expiration"]["Date"].replace(tzinfo=None, microsecond=0).isoformat() + "Z"
return result['Rules']
def get_website(bucket_name, **conn):
try:
result = get_bucket_website(Bucket=bucket_name, **conn)
except ClientError as e:
if "NoSuchWebsiteConfiguration" not in str(e):
raise e
return None
website = {}
if result.get("IndexDocument"):
website["IndexDocument"] = result["IndexDocument"]
if result.get("RoutingRules"):
website["RoutingRules"] = result["RoutingRules"]
if result.get("RedirectAllRequestsTo"):
website["RedirectAllRequestsTo"] = result["RedirectAllRequestsTo"]
if result.get("ErrorDocument"):
website["ErrorDocument"] = result["ErrorDocument"]
return website
def get_cors(bucket_name, **conn):
try:
result = get_bucket_cors(Bucket=bucket_name, **conn)
except ClientError as e:
if "NoSuchCORSConfiguration" not in str(e):
raise e
return []
cors = []
for rule in result["CORSRules"]:
cors_rule = {}
if rule.get("AllowedHeaders"):
cors_rule["AllowedHeaders"] = rule["AllowedHeaders"]
if rule.get("AllowedMethods"):
cors_rule["AllowedMethods"] = rule["AllowedMethods"]
if rule.get("AllowedOrigins"):
cors_rule["AllowedOrigins"] = rule["AllowedOrigins"]
if rule.get("ExposeHeaders"):
cors_rule["ExposeHeaders"] = rule["ExposeHeaders"]
if rule.get("MaxAgeSeconds"):
cors_rule["MaxAgeSeconds"] = rule["MaxAgeSeconds"]
cors.append(cors_rule)
return cors
def get_bucket_region(**kwargs):
# Some s3 buckets do not allow viewing of details. We do not want to
# throw an error in this case because we still want to see that the
# bucket exists
try:
result = get_bucket_location(**kwargs)
location = result['LocationConstraint']
except ClientError as e:
if 'AccessDenied' not in str(e):
raise e
return None
if not location:
return 'us-east-1'
return S3_REGION_MAPPING.get(location, location)
def create(self):
"""
Create a new lambda function or update
an existing function.
:return:
"""
try:
PrintMsg.out(self.get())
PrintMsg.updating('{} in region {}'.format(
self.function_name, self.region))
self.__update__()
except ClientError as e:
m = re.search('.*\sarn:aws:lambda:(.*):(.*):function:.*', str(e))
if m:
account_id = m.group(2)
else:
account_id = None
PrintMsg.out(e)
PrintMsg.creating(self.function_name)
self.__create__(account_id)
def commit(self, preserve_cache=False):
if not preserve_cache:
self._clear_cache()
if not self._change_batch:
return
try:
self._client.change_resource_record_sets(
HostedZoneId=self.id,
ChangeBatch={'Changes': self._change_batch}
)
except ClientError as excp:
if excp.response['Error']['Code'] == 'InvalidInput':
logging.exception("failed to process batch %r", self._change_batch)
raise
self._reset_change_batch()
def _cache_aws_records(self):
if self._aws_records is not None:
return
if not self.id:
return
paginator = self._client.get_paginator('list_resource_record_sets')
records = []
try:
for page in paginator.paginate(HostedZoneId=self.id):
records.extend(page['ResourceRecordSets'])
except ClientError as excp:
if excp.response['Error']['Code'] != 'NoSuchHostedZone':
raise
self._clear_cache()
else:
self._aws_records = records
self._exists = True
def _reconcile_zone(self):
"""
Handles zone creation/deletion.
"""
if self.db_zone.deleted:
self.delete()
elif self.db_zone.route53_id is None:
self.create()
elif not self.exists:
try:
self.create()
except ClientError as excp:
if excp.response['Error']['Code'] != 'HostedZoneAlreadyExists':
raise
# This can happen if a zone was manually deleted from AWS.
# Create will fail because we re-use the caller_reference
self.db_zone.caller_reference = None
self.db_zone.save()
self.create()
def reconcile(self):
if self.ip.deleted:
self.delete()
self.ip.delete()
elif self.exists:
# if the desired config is not a subset of the current config
if not self.desired_config.items() <= self.config.items():
self.delete()
self.create()
else:
logger.info("%-15s nothing to do", self.ip.ip)
else:
try:
self.create()
except ClientError as excp:
if excp.response['Error']['Code'] != 'HealthCheckAlreadyExists':
raise
self.ip.healthcheck_caller_reference = None
self.ip.save()
self.create()
def check_bucket(bucket):
s3 = boto3.client('s3', region_name=AWS_DEFAULT_REGION)
print('Checking bucket: ' + bucket)
try:
s3.head_bucket(Bucket=bucket)
except ClientError:
print('Creating bucket: ' + bucket)
args = {
'Bucket': bucket
}
if AWS_DEFAULT_REGION != 'us-east-1':
args['CreateBucketConfiguration'] = {
'LocationConstraint': AWS_DEFAULT_REGION
}
s3.create_bucket(**args)
waiter = s3.get_waiter('bucket_exists')
waiter.wait(Bucket=bucket)
def get_timestamp_from_s3_object(bucket, key):
s3 = boto3.client('s3', region_name=AWS_DEFAULT_REGION)
try:
response = s3.get_object(
Bucket=bucket,
Key=key
)
timestamp = response['LastModified'] # We assume this is UTC.
except ClientError:
timestamp = datetime(1970, 1, 1, tzinfo=None)
return (timestamp.replace(tzinfo=None) - datetime(1970, 1, 1, tzinfo=None)).total_seconds()
# IAM
delete_orphaned_keys.py 文件源码
项目:sync-buckets-state-machine
作者: awslabs
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def run(self):
while not self.job_queue.empty():
try:
key = self.job_queue.get(True, 1)
except Empty:
return
try:
self.s3.head_object(Bucket=self.source, Key=key)
logger.info('Key: ' + key + ' is present in source bucket, nothing to do.')
except ClientError as e:
if int(e.response['Error']['Code']) == 404: # The key was not found.
logger.info('Key: ' + key + ' is not present in source bucket. Deleting orphaned key.')
self.s3.delete_object(Bucket=self.destination, Key=key)
else:
raise e
# Functions
def test_create_unable_to_create_vpc(self):
self.mock_object(
self.fake_driver.client, 'create_vpc',
mock.Mock(side_effect=ClientError(
fake_error_code,
'operation_name'
)
)
)
self.mock_object(
self.fake_driver.client, 'create_subnet', mock.Mock())
self.assertRaises(ClientError, self.fake_driver.create,
'fake_name', '10.10.10.0/24')
self.fake_driver.client.create_vpc.\
assert_called_once_with(
CidrBlock='10.10.10.0/24',
InstanceTenancy='default'
)
self.assertFalse(self.fake_driver.client.create_subnet.called)
def test_create_unable_to_create_subnet(self):
self.mock_object(
self.fake_driver.client, 'create_vpc',
mock.Mock(return_value=fake_vpc_out))
self.mock_object(
self.fake_driver.client, 'create_subnet',
mock.Mock(side_effect=ClientError(
fake_error_code,
'operation_name'
)))
self.assertRaises(ClientError, self.fake_driver.create,
'fake_name', '10.10.10.0/24')
self.fake_driver.client.create_vpc.\
assert_called_once_with(
CidrBlock='10.10.10.0/24',
InstanceTenancy='default'
)
self.fake_driver.client.create_subnet.\
assert_called_once_with(
VpcId='vpc-5eed72c5',
CidrBlock='10.10.10.0/24'
)
def test_delete_unable_to_describe_subnets(self):
self.mock_object(
self.fake_driver.client, 'describe_subnets',
mock.Mock(side_effect=ClientError(
fake_error_code,
'operation_name'
)
))
self.mock_object(self.fake_driver.client,
'delete_subnet', mock.Mock())
self.mock_object(self.fake_driver.client,
'delete_vpc', mock.Mock())
self.assertRaises(ClientError, self.fake_driver.delete,
'subnet-9dcb6b38')
self.fake_driver.client.describe_subnets.\
assert_called_once_with(SubnetIds=['subnet-9dcb6b38'])
self.assertFalse(self.fake_driver.client.delete_subnet.called)
self.assertFalse(self.fake_driver.client.delete_vpc.called)
def test_delete_unable_to_detete_vpc(self):
self.mock_object(
self.fake_driver.client, 'describe_subnets',
mock.Mock(return_value=fake_describe_subnets))
self.mock_object(
self.fake_driver.client, 'delete_subnet',
mock.Mock(return_value=fake_delete_subnet_out))
self.mock_object(
self.fake_driver.client, 'delete_vpc',
mock.Mock(side_effect=ClientError(
fake_error_code,
'operation_name'
)
))
self.assertRaises(ClientError, self.fake_driver.delete,
'subnet-9dcb6b38')
self.fake_driver.client.describe_subnets.\
assert_called_once_with(SubnetIds=['subnet-9dcb6b38'])
self.fake_driver.client.delete_subnet.\
assert_called_once_with(SubnetId='subnet-9dcb6b38')
self.fake_driver.client.delete_vpc.\
assert_called_once_with(VpcId='vpc-5eed72c5')
def test_upload_object_failed(self):
self.mock_object(
self.fake_driver.client, 'put_object',
mock.Mock(side_effect=ClientError(
fake_error_code_resp,
'UploadObject'
))
)
self.assertRaises(ClientError,
self.fake_driver.upload_object,
'fake-container', 'fake-obj',
'fake-content', metadata={'newkey': 'newvalue'},
content_length=None)
self.fake_driver.client.put_object.assert_called_once_with(
Bucket='fake-container',
Key='fake-obj',
Body='fake-content',
ContentLength=None,
Metadata={'x-amz-newkey': 'newvalue'},
)
def test_download_object_failed(self):
self.mock_object(
self.fake_driver.client, 'get_object',
mock.Mock(side_effect=ClientError(
fake_error_code_resp,
'DownloadObject'
))
)
self.assertRaises(ClientError,
self.fake_driver.download_object,
'fake-container', 'fake-obj')
self.fake_driver.client.get_object.assert_called_once_with(
Bucket='fake-container',
Key='fake-obj',
)
def test_stat_object_failed(self):
self.mock_object(
self.fake_driver.client, 'head_object',
mock.Mock(side_effect=ClientError(
fake_error_code_resp,
'HeadObject'
))
)
self.assertRaises(ClientError,
self.fake_driver.stat_object,
'fake-container', 'fake-obj')
self.fake_driver.client.head_object.assert_called_once_with(
Bucket='fake-container',
Key='fake-obj',
)
def test_delete_object_failed(self):
self.mock_object(
self.fake_driver.client, 'delete_object',
mock.Mock(side_effect=ClientError(
fake_error_code_resp,
'DeleteObject'
))
)
self.assertRaises(ClientError,
self.fake_driver.delete_object,
'fake-container', 'fake-obj')
self.fake_driver.client.delete_object.assert_called_once_with(
Bucket='fake-container',
Key='fake-obj',
)