def __init__(self, session, key_prefix, table_name, aws_access_key_id=None,
aws_secret_access_key=None, region=None, endpoint_url=None, use_signer=False, permanent=True):
if session is None:
import boto3
session = boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,
region_name=region)
self.client = session.client('dynamodb', endpoint_url=endpoint_url)
self.key_prefix = key_prefix
self.use_signer = use_signer
self.permanent = permanent
if table_name not in self.client.list_tables().get(u'TableNames'):
raise RuntimeError("The table {0!s} does not exist in DynamoDB for the requested region of {1!s}. Please "
"ensure that the table has a PrimaryKey of \"SessionID\"".format(
table_name,
session.region_name
))
self.table_name = table_name
python类Session()的实例源码
assume-role-write-properties.py 文件源码
项目:aem-aws-stack-builder
作者: shinesolutions
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def main():
args = parse_args()
set_logging_level(args.quiet, args.verbose)
log.debug('Args: %r', args)
session = boto3.Session(profile_name=args.profile)
exported_keys = [
'AccessKeyId', 'SecretAccessKey', 'SessionToken', 'Expiration',
]
sts = session.client('sts')
assumed_role = sts.assume_role(
RoleArn = args.role,
RoleSessionName = args.session_name,
)
credentials = assumed_role.get('Credentials', {})
credentials['Expiration'] = credentials['Expiration'].isoformat()
out = sys.stdout if args.output is None else open(args.output, 'w')
for k in exported_keys:
out.write('{0}={1}\n'.format(k, credentials.get(k)))
def _get_s3_files(file_paths):
ACCESS_KEY = os.getenv("PYFIDDLE_S3_KEY")
SECRET_KEY = os.getenv("PYFIDDLE_S3_SECRET")
session_get_files = boto3.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
region_name=os.getenv("PYFIDDLE_S3_REGION"),
)
s3 = session_get_files.resource('s3')
file_dir = os.getenv("PYFIDDLE_WRITE_DIR")
for file_path in file_paths:
s3.Bucket(
os.getenv("PYFIDDLE_S3_BUCKET")).download_file(
str(file_path.script.id)+"/"+file_path.name,
file_dir+file_path.name
)
def check_aws_credentials():
session = boto3.Session()
credentials = None
try:
credentials = session.get_credentials()
except Exception:
pass
if not credentials:
# set temporary dummy credentials
os.environ['AWS_ACCESS_KEY_ID'] = 'LocalStackDummyAccessKey'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'LocalStackDummySecretKey'
session = boto3.Session()
credentials = session.get_credentials()
assert credentials
# -----------------------------
# INFRASTRUCTURE HEALTH CHECKS
# -----------------------------
def rundeck_list_resources():
'''
Return a list of S3 and EC2 Resources from all available profiles
'''
resp_obj = {}
awsconfig = aws_config.AwsConfig()
profiles = awsconfig.get_profiles()
# Populate s3 buckets.
for profile in profiles:
session = boto3.Session(profile_name=profile)
s3client = session.client('s3')
try:
s3info = s3client.list_buckets()
except botocore.exceptions.ClientError:
s3info['Buckets'] = []
for bucket in s3info['Buckets']:
bucket_text = "s3: (%s) %s" % (profile, bucket['Name'])
resp_obj[bucket_text] = bucket['Name']
# Populate ec2 instances.
for profile in profiles:
session = boto3.Session(profile_name=profile)
ec2client = session.client('ec2', region_name="us-east-1")
try:
ec2info = ec2client.describe_instances()
except botocore.exceptions.ClientError:
ec2info['Instances'] = []
for reservation in ec2info['Reservations']:
for instance in reservation['Instances']:
instance_text = "ec2: (%s) %s" % \
(profile, instance['InstanceId'])
resp_obj[instance_text] = instance['InstanceId']
return jsonify(resp_obj)
def list_groups(profile):
'''
Return all the groups.
'''
resp_obj = {}
resp_obj['status'] = 'OK'
awsconfig = aws_config.AwsConfig()
profiles = awsconfig.get_profiles()
profile_valid = False
for configuredprofile in profiles:
if profile == configuredprofile:
profile_valid = True
if not profile_valid:
resp_obj['status'] = 'FAIL'
return jsonify(resp_obj)
session = boto3.Session(profile_name=profile)
iamclient = session.client('iam')
try:
groupinfo = iamclient.list_groups()
except botocore.exceptions.ClientError:
groupinfo['Groups'] = []
groups = []
for group in groupinfo['Groups']:
groups.append(group['GroupName'])
resp_obj['groups'] = groups
return jsonify(resp_obj)
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None,
iam_role_discover=False):
'''
Create a EC2 service client to one ore more environments by name.
'''
service = 'ec2'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
if iam_role_discover:
session = boto3.Session()
self.clients['default'] = {}
for region in self.regions:
self.clients['default'][region] = \
session.client(service, region_name=region)
else:
self.awsconfig = AwsConfig()
profiles = self.awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None,
iam_role_discover=False):
'''
Create a ELB service client to one ore more environments by name.
'''
service = 'elb'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
if iam_role_discover:
session = boto3.Session()
self.clients['default'] = {}
for region in self.regions:
self.clients['default'][region] = \
session.client(service, region_name=region)
else:
self.awsconfig = AwsConfig()
profiles = self.awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None,
iam_role_discover=False):
'''
Create a S3 service client to one ore more environments by name.
'''
service = 's3'
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
if iam_role_discover:
session = boto3.Session()
self.clients['default'] = session.client(service)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = session.client(service)
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None,
iam_role_discover=False):
"""
Create a Autoscaling service client to one ore more environments
by name.
"""
service = 'application-autoscaling'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
if iam_role_discover:
session = boto3.Session()
self.clients['default'] = {}
for region in self.regions:
self.clients['default'][region] = \
session.client(service, region_name=region)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None,
iam_role_discover=False):
'''
Create a cloudwatch service client to one ore more environments by name.
'''
service = 'cloudwatch'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
if iam_role_discover:
session = boto3.Session()
self.clients['default'] = {}
for region in self.regions:
self.clients['default'][region] = \
session.client(service, region_name=region)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service,
region_name=region)
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None,
iam_role_discover=False):
'''
Create a iam service client to one ore more environments by name.
'''
service = 'iam'
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
if iam_role_discover:
session = boto3.Session()
self.client['default'] = session.client(service)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = session.client(service)
def check_sts_token(self, profile):
""" Verifies that STS credentials are valid """
# Don't check for creds if profile is blank
if not profile:
return False
parser = RawConfigParser()
parser.read(self.creds_file)
if not os.path.exists(self.creds_dir):
if self.verbose:
print("AWS credentials path does not exit. Not checking.")
return False
elif not os.path.isfile(self.creds_file):
if self.verbose:
print("AWS credentials file does not exist. Not checking.")
return False
elif not parser.has_section(profile):
if self.verbose:
print("No existing credentials found. Requesting new credentials.")
return False
session = boto3.Session(profile_name=profile)
sts = session.client('sts')
try:
sts.get_caller_identity()
except ClientError as ex:
if ex.response['Error']['Code'] == 'ExpiredToken':
print("Temporary credentials have expired. Requesting new credentials.")
return False
if self.verbose:
print("STS credentials are valid. Nothing to do.")
return True
def destroy_dns(app='', env='dev', **_):
"""Destroy DNS records.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
regions (str): AWS region.
Returns:
bool: True upon successful completion.
"""
client = boto3.Session(profile_name=env).client('route53')
generated = get_details(app=app, env=env)
record = generated.dns_elb()
zone_ids = get_dns_zone_ids(env=env, facing='external')
for zone_id in zone_ids:
record_sets = client.list_resource_record_sets(
HostedZoneId=zone_id, StartRecordName=record, StartRecordType='CNAME', MaxItems='1')
for found_record in record_sets['ResourceRecordSets']:
assert destroy_record(client=client, found_record=found_record, record=record, zone_id=zone_id)
return True
def destroy_s3(app='', env='dev', **_):
"""Destroy S3 Resources for _app_ in _env_.
Args:
app (str): Application name
env (str): Deployment environment/account name
Returns:
boolean: True if destroyed sucessfully
"""
session = boto3.Session(profile_name=env)
client = session.resource('s3')
generated = get_details(app=app, env=env)
archaius = generated.archaius()
bucket = client.Bucket(archaius['bucket'])
for item in bucket.objects.filter(Prefix=archaius['path']):
item.Object().delete()
LOG.info('Deleted: %s/%s', item.bucket_name, item.key)
return True
def get_lambda_arn(app, account, region):
"""Get lambda ARN.
Args:
account (str): AWS account name.
region (str): Region name, e.g. us-east-1
app (str): Lambda function name
Returns:
str: ARN for requested lambda function
"""
session = boto3.Session(profile_name=account, region_name=region)
lambda_client = session.client('lambda')
lambda_arn = None
paginator = lambda_client.get_paginator('list_functions')
for lambda_functions in paginator.paginate():
for lambda_function in lambda_functions['Functions']:
if lambda_function['FunctionName'] == app:
lambda_arn = lambda_function['FunctionArn']
LOG.debug("Lambda ARN for lambda function %s is %s.", app, lambda_arn)
break
if lambda_arn:
break
if not lambda_arn:
LOG.fatal('Lambda function with name %s not found in %s %s', app, account, region)
raise LambdaFunctionDoesNotExist(
'Lambda function with name {0} not found in {1} {2}'.format(app, account, region))
return lambda_arn
def get_lambda_alias_arn(app, account, region):
"""Get lambda alias ARN. Assumes that account name is equal to alias name.
Args:
account (str): AWS account name.
region (str): Region name, e.g. us-east-1
app (str): Lambda function name
Returns:
str: ARN for requested lambda alias
"""
session = boto3.Session(profile_name=account, region_name=region)
lambda_client = session.client('lambda')
lambda_aliases = lambda_client.list_aliases(FunctionName=app)
matched_alias = None
for alias in lambda_aliases['Aliases']:
if alias['Name'] == account:
lambda_alias_arn = alias['AliasArn']
LOG.info('Found ARN for alias %s for function %s', account, app)
matched_alias = lambda_alias_arn
break
else:
fatal_message = 'Lambda alias {0} of function {1} not found'.format(account, app)
LOG.fatal(fatal_message)
raise LambdaAliasDoesNotExist(fatal_message)
return matched_alias
def get_dns_zone_ids(env='dev', facing='internal'):
"""Get Route 53 Hosted Zone IDs for _env_.
Args:
env (str): Deployment environment.
facing (str): Type of ELB, external or internal.
Returns:
list: Hosted Zone IDs for _env_. Only *PrivateZone* when _facing_ is
internal.
"""
client = boto3.Session(profile_name=env).client('route53')
zones = client.list_hosted_zones_by_name(DNSName='.'.join([env, DOMAIN]))
zone_ids = []
for zone in zones['HostedZones']:
LOG.debug('Found Hosted Zone: %s', zone)
if facing == 'external' or zone['Config']['PrivateZone']:
LOG.info('Using %(Id)s for "%(Name)s", %(Config)s', zone)
zone_ids.append(zone['Id'])
LOG.debug('Zone IDs: %s', zone_ids)
return zone_ids
def update_dns_zone_record(env, zone_id, **kwargs):
"""Create a Route53 CNAME record in _env_ zone.
Args:
env (str): Deployment environment.
zone_id (str): Route53 zone id.
Keyword Args:
dns_name (str): FQDN of application's dns entry to add/update.
dns_name_aws (str): FQDN of AWS resource
dns_ttl (int): DNS time-to-live (ttl)
"""
client = boto3.Session(profile_name=env).client('route53')
response = {}
hosted_zone_info = client.get_hosted_zone(Id=zone_id)
zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.')
dns_name = kwargs.get('dns_name')
if dns_name and dns_name.endswith(zone_name):
dns_name_aws = kwargs.get('dns_name_aws')
# This is what will be added to DNS
dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs)
LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id,
zone_name)
try:
response = client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=json.loads(dns_json), )
LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name)
except botocore.exceptions.ClientError as error:
LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id,
zone_name)
LOG.debug(error)
else:
LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name)
LOG.debug('Route53 JSON Response: \n%s', pformat(response))
def get_sns_topic_arn(topic_name, account, region):
"""Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name
"""
if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'):
return topic_name
session = boto3.Session(profile_name=account, region_name=region)
sns_client = session.client('sns')
topics = sns_client.list_topics()['Topics']
matched_topic = None
for topic in topics:
topic_arn = topic['TopicArn']
if topic_name == topic_arn.split(':')[-1]:
matched_topic = topic_arn
break
else:
LOG.critical("No topic with name %s found.", topic_name)
raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))
return matched_topic
def __init__(self, client=None, **kwargs):
self.stream_id = str(uuid.uuid4())
self.dimensions = []
self.timers = {}
self.dimension_stack = []
self.storage_resolution = 60
self.with_dimension('MetricStreamId', self.stream_id)
if client:
self.client = client
else:
profile = kwargs.get('Profile')
if profile:
session = boto3.session.Session(profile_name=profile)
self.client = session.client('cloudwatch')
else:
self.client = boto3.client('cloudwatch')
def create_role(session, role_name, account_number):
client = session.client('iam')
res = None
try:
role = client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(default_trust(str(account_number)))
)
client.attach_role_policy(
RoleName=role_name,
PolicyArn='arn:aws:iam::aws:policy/AdministratorAccess'
)
res = role['Role']['Arn']
print "+ Created IAM role: {}".format(res)
except ClientError as e:
raise e
return res
def create_cw_event_trigger(session):
client = session.client('dynamodb')
rule_arn = []
try:
response = client.put_rule(
Name='{}'.format(CW_RULE_NAME),
ScheduleExpression='cron(0/10 * * * ? *)',
Description='schedule ec2 lamabda function to run every 10 minutes.'
)
rule_arn = response['RuleArn']
print "+ Created Cloud Watch Rule: ".format(rule_arn)
except Exception as e:
raise e
return rule_arn
def _connect_to_aws_service(self, service_name):
"""
Connect to the specified AWS service via explicit credentials
(shared by the AWS CLI) or an instance role
"""
service = None
region = self.args.aws_region # prefer explicit region vs. CLI config
if not region: region = self._get_aws_region_from_config()
try:
aws = boto3.session.Session(aws_access_key_id=self.aws_credentials['aws_access_key_id'], aws_secret_access_key=self.aws_credentials['aws_secret_access_key'], region_name=region)
service = aws.client(service_name)
self._log("Connected to AWS {} in region {}".format(service_name.capitalize(), region))
except Exception, err:
self._log("Could not connect to AWS {} in region {} using local CLI credentials".format(service_name.capitalize(), region), err=err)
try:
service = boto3.client(service_name)
self._log("Connected to AWS {} in region {}".format(service_name))
except Exception, err:
self._log("Could not connect to AWS {} in region {} using an instance role".format(service_name.capitalize(), region), err=err)
return service
def populate_ecs_service_params(self, session, cf_params, cluster, elb_name, env, region, listener_port):
elb_client = session.client('elbv2', region_name=region)
balancer_arn, vpc_id = ApplyECS.get_load_balancer(elb_client, elb_name, cluster, env)
listener_arn = ApplyECS.get_elb_listener(elb_client, balancer_arn, port=listener_port)
cf_params['vpcid'] = vpc_id
cf_params['listenerarn'] = listener_arn
response = elb_client.describe_rules(ListenerArn=listener_arn)
rules = response['Rules']
existing_priorities = set([rule['Priority'] for rule in rules])
if len(existing_priorities) >= 75:
logging.error("Listener %s already has %d rules, cannot add more services" % (listener_arn, len(existing_priorities)))
raise Exception("Listener %s already has %d rules, cannot add more services" % (listener_arn, len(existing_priorities)))
for i in range(10, 21):
if str(i) not in existing_priorities:
cf_params['priority'] = str(i)
break
cfn_validate_lambda.py 文件源码
项目:automating-governance-sample
作者: awslabs
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def setup_s3_client(job_data):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
Args:
job_data: The job data structure
Returns:
An S3 client with the appropriate credentials
"""
key_id = job_data['artifactCredentials']['accessKeyId']
key_secret = job_data['artifactCredentials']['secretAccessKey']
session_token = job_data['artifactCredentials']['sessionToken']
session = Session(
aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
aws_session_token=session_token)
return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
def create_client(
self,
access_key=AWSDefaults.CREDS['access_key'],
secret_key=AWSDefaults.CREDS['secret_key'],
region=AWSDefaults.CREDS['region']
):
if access_key and secret_key and region:
self.session = self.create_session(
access_key=access_key,
secret_key=secret_key,
region=region
)
return self.session.client(self.aws_service)
else:
return boto3.client(
self.aws_service,
region_name=self.metadata['region']
)
def setup_s3_client(job_data):
key_id = job_data['artifactCredentials']['accessKeyId']
key_secret = job_data['artifactCredentials']['secretAccessKey']
session_token = job_data['artifactCredentials']['sessionToken']
session = Session(aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
aws_session_token=session_token)
return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
def __init__(self, s3_staging_dir=None, region_name=None, schema_name='default',
poll_interval=1, encryption_option=None, kms_key=None, profile_name=None,
converter=None, formatter=None,
retry_exceptions=('ThrottlingException', 'TooManyRequestsException'),
retry_attempt=5, retry_multiplier=1,
retry_max_delay=1800, retry_exponential_base=2,
cursor_class=Cursor, **kwargs):
if s3_staging_dir:
self.s3_staging_dir = s3_staging_dir
else:
self.s3_staging_dir = os.getenv(self._ENV_S3_STAGING_DIR, None)
assert self.s3_staging_dir, 'Required argument `s3_staging_dir` not found.'
assert schema_name, 'Required argument `schema_name` not found.'
self.region_name = region_name
self.schema_name = schema_name
self.poll_interval = poll_interval
self.encryption_option = encryption_option
self.kms_key = kms_key
if profile_name:
session = Session(profile_name=profile_name, **kwargs)
self._client = session.client('athena', region_name=region_name, **kwargs)
else:
self._client = boto3.client('athena', region_name=region_name, **kwargs)
self._converter = converter if converter else TypeConverter()
self._formatter = formatter if formatter else ParameterFormatter()
self.retry_exceptions = retry_exceptions
self.retry_attempt = retry_attempt
self.retry_multiplier = retry_multiplier
self.retry_max_delay = retry_max_delay
self.retry_exponential_base = retry_exponential_base
self.cursor_class = cursor_class
def build_table(session, table_name, account_data):
client = session.client('dynamodb')
try:
t = client.create_table(TableName=table_name,
KeySchema=[
{
'AttributeName': 'name',
'KeyType': 'HASH'
}
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 1
},
)
resource = session.resource('dynamodb')
print "+ Created Dynamodb Table: {}... Waiting for table creation to propagate before inserting items".format(table_name)
sleep(15)
table = resource.Table(table_name)
for i in account_data:
table.put_item(Item=i)
except ClientError as e:
raise e
return t['TableDescription']['TableName']