def role():
new_role = False
try:
logger.info('finding role')
iam('get_role', RoleName='gimel')
except ClientError:
logger.info('role not found. creating')
iam('create_role', RoleName='gimel',
AssumeRolePolicyDocument=ASSUMED_ROLE_POLICY)
new_role = True
role_arn = iam('get_role', RoleName='gimel', query='Role.Arn')
logger.debug('role_arn={}'.format(role_arn))
logger.info('updating role policy')
iam('put_role_policy', RoleName='gimel', PolicyName='gimel',
PolicyDocument=POLICY)
if new_role:
from time import sleep
logger.info('waiting for role policy propagation')
sleep(5)
return role_arn
python类ClientError()的实例源码
def _function_alias(name, version, alias=LIVE):
try:
logger.info('creating function alias {0} for {1}:{2}'.format(
alias, name, version))
arn = aws_lambda('create_alias',
FunctionName=name,
FunctionVersion=version,
Name=alias,
query='AliasArn')
except ClientError:
logger.info('alias {0} exists. updating {0} -> {1}:{2}'.format(
alias, name, version))
arn = aws_lambda('update_alias',
FunctionName=name,
FunctionVersion=version,
Name=alias,
query='AliasArn')
return arn
def _put_bucket_tagging(self):
"""Add new Tags without overwriting old Tags.
Regular put_bucket_tagging sets TagSet which overwrites old tags. Below
logic keeps the old tags in place as well.
"""
try:
# Get current tags list, if no tags exist will get an exception.
result = self.s3client.get_bucket_tagging(Bucket=self.bucket)['TagSet']
except ClientError as error:
LOG.warning(error)
result = []
# Make simplified dictionary of tags from result
all_tags = {}
for tag in result:
all_tags.update({tag.get('Key'): tag.get('Value')})
all_tags.update({'app_group': self.group, 'app_name': self.app_name})
tag_set = generate_s3_tags.generated_tag_data(all_tags)
self.s3client.put_bucket_tagging(Bucket=self.bucket, Tagging={'TagSet': tag_set})
LOG.info("Adding tagging %s for Bucket", tag_set)
def _fetch_images(self, ec2, image_ids):
while True:
try:
return ec2.describe_images(ImageIds=list(image_ids))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidAMIID.NotFound':
msg = e.response['Error']['Message']
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
self.log.warning(
"asg:not-encrypted filter image not found %s",
e_ami_ids)
for e_ami_id in e_ami_ids:
image_ids.remove(e_ami_id)
continue
raise
def process_asg(self, asg):
force_delete = self.data.get('force', False)
if force_delete:
log.info('Forcing deletion of Auto Scaling group %s',
asg['AutoScalingGroupName'])
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
try:
self.manager.retry(
asg_client.delete_auto_scaling_group,
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
log.warning("Erroring deleting asg %s %s",
asg['AutoScalingGroupName'], e)
return
raise
def get_resources(self, ids, cache=True):
key = {'region': self.config.region,
'resource': str(self.__class__.__name__),
'q': None}
if cache and self._cache.load():
resources = self._cache.get(key)
if resources is not None:
self.log.debug("Using cached results for get_resources")
m = self.get_model()
id_set = set(ids)
return [r for r in resources if r[m.id] in id_set]
try:
resources = self.augment(self.source.get_resources(ids))
return resources
except ClientError as e:
self.log.warning("event ids not resolved: %s error:%s" % (ids, e))
return []
def provision(self):
# Avoiding runtime lambda dep, premature optimization?
from c7n.mu import PolicyLambda, LambdaManager
with self.policy.ctx:
self.policy.log.info(
"Provisioning policy lambda %s", self.policy.name)
variables = {
'account_id': self.policy.options.account_id,
'policy': self.policy.data
}
self.policy.data = self.expand_variables(variables)
try:
manager = LambdaManager(self.policy.session_factory)
except ClientError:
# For cli usage by normal users, don't assume the role just use
# it for the lambda
manager = LambdaManager(
lambda assume=False: self.policy.session_factory(assume))
return manager.publish(
PolicyLambda(self.policy), 'current',
role=self.policy.options.assume_role)
def get_s3_bucket(bucket_name):
s3 = boto3.resource('s3',
aws_access_key_id=config.access_id,
aws_secret_access_key=config.access_secret
)
try:
bucket = s3.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
return bucket
except ClientError as e:
# if the bucket is created will raise this error, return it, else return False
error_code = e.response['Error']['Code']
if error_code == 'BucketAlreadyOwnedByYou':
return s3.Bucket(bucket_name)
else:
return False
# should replace bucket name here with correct one
def paths(self):
s3 = boto3.client('s3')
pathattrib = [a.attrib['path'].strip() for a in self.dom.getroottree().iterfind('//*[@path]')]
pathnodes = [a.text.strip() for a in self.dom.getroottree().iterfind('//Path')]
errors = []
for p in pathattrib+pathnodes:
if re.match('^([A-Za-z]:|/)', p):
errors.append("Invalid absolute path detected: '{}'".format(p))
else:
# Now make sure the files really exist
filekey = os.path.join(self.projroot, p.replace("\\","/"))
try:
s3.head_object(Bucket=self.bucket, Key=filekey)
except ClientError, e:
errors.append("File not found on repository: {}".format(filekey))
if len(errors) > 0:
self.validators.append(ValidatorResult("Path Checking", "FAIL", errors))
else:
self.validators.append(ValidatorResult("Path Checking", "PASS"))
def _clear_method(api_id, resource_id, http_method):
try:
method = apigateway('get_method', restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method)
except ClientError:
method = None
if method:
apigateway('delete_method', restApiId=api_id, resourceId=resource_id,
httpMethod=http_method)
def create_update_lambda(role_arn, wiring):
name, handler, memory, timeout = (wiring[k] for k in ('FunctionName',
'Handler',
'MemorySize',
'Timeout'))
try:
logger.info('finding lambda function')
function_arn = aws_lambda('get_function',
FunctionName=name,
query='Configuration.FunctionArn')
except ClientError:
function_arn = None
if not function_arn:
logger.info('creating new lambda function {}'.format(name))
with open('gimel.zip', 'rb') as zf:
function_arn, version = aws_lambda('create_function',
FunctionName=name,
Runtime='python2.7',
Role=role_arn,
Handler=handler,
MemorySize=memory,
Timeout=timeout,
Publish=True,
Code={'ZipFile': zf.read()},
query='[FunctionArn, Version]')
else:
logger.info('updating lambda function {}'.format(name))
with open('gimel.zip', 'rb') as zf:
function_arn, version = aws_lambda('update_function_code',
FunctionName=name,
Publish=True,
ZipFile=zf.read(),
query='[FunctionArn, Version]')
function_arn = _function_alias(name, version)
_cleanup_old_versions(name)
logger.debug('function_arn={} ; version={}'.format(function_arn, version))
return function_arn
def _bucket_exists(self):
"""Check if the bucket exists."""
try:
self.s3client.get_bucket_location(Bucket=self.bucket)
return True
except ClientError as error:
LOG.error(error)
return False
def check_s3(self, bucket, element): # pragma: no cover
"""This method is a helper con `cache_s3`.
Read method `cache_s3` for more information.
:param bucket:
:param element:
:return:
"""
session = boto3.Session(profile_name=self.profile_name)
s3 = session.resource('s3')
try:
s3.meta.client.head_bucket(Bucket=bucket)
except ClientError:
# If the bucket does not exists then simply use the original
# I silently fail returning everything as it is in the url
return False
try:
# If the key does not exists do not return False, but try to
# create a readonly user in order to not have problems into the
# travis environment.
s3.Object(bucket, element).load()
except ClientError:
return False
else:
return True
def _getAlarms(self, metricName, namespace, sleepForIfFailed):
alarms = []
if sleepForIfFailed > 60:
raise RuntimeError("Too many client errors")
try:
alarms = self.cloudWatch.describe_alarms_for_metric(MetricName=metricName, Namespace=namespace)
except ClientError:
time.sleep(sleepForIfFailed)
self._getAlarms(metricName, namespace, sleepForIfFailed * 2)
return alarms
def swf_exception_wrapper():
try:
yield
except ClientError as err:
err_type = err.response['Error'].get('Code', 'SWFResponseError')
err_msg = err.response['Error'].get(
'Message', 'No error message provided...')
raise _swf_fault_exception.get(err_type, SWFResponseError)(err_msg)
def get_snapshots(self, ec2, snap_ids):
"""get snapshots corresponding to id, but tolerant of missing."""
while True:
try:
result = ec2.describe_snapshots(SnapshotIds=snap_ids)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
msg = e.response['Error']['Message']
e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
self.log.warning("Snapshot not found %s" % e_snap_id)
snap_ids.remove(e_snap_id)
continue
raise
else:
return result.get('Snapshots', ())
def process_asg(self, asg):
"""Multistep process to stop an asg aprori of setup
- suspend processes
- stop instances
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
processes = list(self.ASG_PROCESSES.difference(
self.data.get('exclude', ())))
try:
self.manager.retry(
asg_client.suspend_processes,
ScalingProcesses=processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
ec2_client = session.client('ec2')
try:
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.stop_instances, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] in (
'InvalidInstanceID.NotFound',
'IncorrectInstanceState'):
log.warning("Erroring stopping asg instances %s %s" % (
asg['AutoScalingGroupName'], e))
return
raise
def __init__(self, table_name):
self.table_name = table_name
self.ddb_client = boto3.client("dynamodb")
self.ddb_resource = boto3.resource("dynamodb", region_name='eu-west-1')
try:
self.ddb_client.describe_table(TableName=table_name)
self.table_already_existed = True
print('Table {} already exists'.format(table_name))
except ClientError as e:
print('Table {} does not appear to exist, creating...'.format(table_name))
self.table_already_existed = False
table = self.ddb_resource.create_table(
TableName=table_name,
KeySchema=[
{
'AttributeName': 'customerId',
'KeyType': 'HASH' # Partition key
}
],
AttributeDefinitions=[
{
'AttributeName': 'customerId',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table.meta.client.get_waiter('table_exists').wait(TableName=table_name)
print('Table created {}'.format(str(table)))
def bucket_exists(awsclient, bucket):
client_s3 = awsclient.get_client('s3')
try:
client_s3.head_bucket(Bucket=bucket)
return True
except ClientError:
return False
def bucket_exists(self, bucket_name):
"""
Returns ``True`` if a bucket exists and you have access to
call ``HeadBucket`` on it, otherwise ``False``.
"""
try:
self.s3.meta.client.head_bucket(Bucket=bucket_name)
return True
except ClientError:
return False
def iam_role_exists(self):
"""
Returns ``True`` if an IAM role exists.
"""
try:
self.iam.meta.client.get_role(
RoleName=self.role_name)
return True
except ClientError:
return None
def bucket_exists(bucket):
try:
bucket.creation_date
return True
except ClientError:
return False
def deploy(self):
try:
zone_id = self._get_hosted_zone_id()
bucket_name = self._get_bucket_name(zone_id)
custom_domain = self.config.domain if zone_id else None
s3 = boto3.resource('s3', self.config.region)
bucket = s3.Bucket(bucket_name)
ensure_website_bucket_exists(bucket=bucket, region=self.config.region)
self._upload_artifacts(bucket)
website_endpoint = get_website_endpoint(bucket_name)
if custom_domain:
ensure_route53_s3_setup(
zone_id=zone_id,
bucket_name=bucket_name,
website_endpoint=website_endpoint
)
url = 'http://{}'.format(custom_domain)
if self.config.cdn:
ensure_cloudfront_s3_setup(
bucket_name=bucket_name,
domain_name=custom_domain,
)
else:
url = 'http://{}'.format(website_endpoint)
logger.info('Website uploaded to %s', url)
except ClientError as ex:
error_code = get_error_code(ex)
if error_code == 'BucketAlreadyExists':
logger.error('Error: The name "%s" is already taken.', bucket_name)
sys.exit(1)
if error_code == 'InvalidBucketName':
logger.error('Error: Invalid bucket name "%s".', bucket_name)
logger.error('\nSee bucket naming rules here:\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules\n')
sys.exit(1)
raise
def get_images(self):
manager = self.manager.get_resource_manager('ami')
images = set()
image_snaps = set()
image_ids = list({lc['ImageId'] for lc in self.configs.values()})
# Pull account images, we should be able to utilize cached values,
# drawn down the image population to just images not in the account.
account_images = [
i for i in manager.resources() if i['ImageId'] in image_ids]
account_image_ids = {i['ImageId'] for i in account_images}
image_ids = [image_id for image_id in image_ids
if image_id not in account_image_ids]
# To pull third party images, we explicitly use a describe
# source without any cache.
#
# Can't use a config source since it won't have state for
# third party ami, we auto propagate source normally, so we
# explicitly pull a describe source. Can't use a cache either
# as their not in the account.
#
while image_ids:
try:
amis = manager.get_source('describe').get_resources(
image_ids, cache=False)
account_images.extend(amis)
break
except ClientError as e:
msg = e.response['Error']['Message']
if e.response['Error']['Code'] != 'InvalidAMIID.NotFound':
raise
for n in msg[msg.find('[') + 1: msg.find(']')].split(','):
image_ids.remove(n.strip())
for a in account_images:
images.add(a['ImageId'])
# Capture any snapshots, images strongly reference their
# snapshots, and some of these will be third party in the
# case of a third party image.
for bd in a.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
image_snaps.add(bd['Ebs']['SnapshotId'].strip())
return images, image_snaps