def configure_env():
''' Configures the fabric env. '''
config = get_config()
stage = get_stage()
stage_config = get_stage_config(stage)
env.user = stage_config.get('user') or config['user']
env.port = stage_config.get('port') or config['port']
env.cwd = stage_config.get('app_dir') or config['app_dir']
env.key_filename = stage_config.get(
'key_filename') or config['key_filename']
env.hosts = [stage_config['host']]
ssh_forward_agent = stage_config.get(
'ssh_forward_agent') or config['ssh_forward_agent']
env.forward_agent = (
ssh_forward_agent and
str(ssh_forward_agent).lower() == 'true'
)
# If Verbose logging is turned on show verbose logs.
verbose_logging = stage_config.get('verbose_logging') or config[
'verbose_logging']
if str(verbose_logging).lower() == 'true':
set_verbose_logging()
python类key_filename()的实例源码
def deploy_test(key_file_name="../ec2.pem"):
env.key_filename = key_file_name
changes = local("git status --porcelain", capture=True)
if len(changes):
print " {}".format(changes)
proceed = prompt(
"you have uncommited changes, do you want to proceed",
default=False,
validate=bool
)
if not proceed:
return
git_branch_name = local('git rev-parse --abbrev-ref HEAD', capture=True)
with prefix(". /usr/share/virtualenvwrapper/virtualenvwrapper.sh"):
with prefix("workon {}".format(virtual_env_name)):
run("git pull origin {}".format(git_branch_name))
run("pip install -r requirements.txt")
run("alembic upgrade head")
run("pkill twistd||true")
run("pkill gloss||true")
run("twistd multiple_mllp --receiver gloss.ohc_receiver.OhcReceiver")
run("gunicorn -w 1 -b 0.0.0:6767 -D gloss.api:app")
def vagrant():
env.srvr = 'vagrant'
env.path = os.path.join('/', env.srvr)
# this is necessary because ssh will fail when known hosts keys vary
# every time vagrant is destroyed, a new key will be generated
env.disable_known_hosts = True
env.within_virtualenv = 'source {}'.format(
os.path.join('~', 'venv', 'bin', 'activate'))
result = dict(line.split()
for line in local('vagrant ssh-config',
capture=True).splitlines())
env.hosts = ['%s:%s' % (result['HostName'], result['Port'])]
env.key_filename = result['IdentityFile']
env.user = result['User']
print(env.key_filename, env.hosts, env.user)
def install(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
software_config = helper.get_software_config(host_config, 'redis')
redis_version = software_config.get('version', '3.2.6')
redis_port = software_config.get('port', '6379')
redis_data_dir = software_config.get('data-directory', '/var/lib/redis')
machine.disable_transparent_huge_pages(env.host_string)
machine.set_overcommit_memory(env.host_string, 1)
put('{}/software/scripts/redis.sh'.format(os.getcwd()), '~/', use_sudo=True)
sudo("chmod +x redis.sh")
sudo(". ~/redis.sh {} {} {}".format(redis_version, redis_port, redis_data_dir))
def install(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
software_config = helper.get_software_config(host_config, 'zookeeper')
java.v8_install(host_config)
port = software_config.get('port', '2181')
zk_server_id = software_config.get('id', '0')
zk_nodes = ",".join(software_config.get('nodes'))
put('{}/software/scripts/zookeeper.sh'.format(os.getcwd()), '~/', use_sudo=True)
sudo("chmod +x zookeeper.sh")
sudo(". ~/zookeeper.sh {} {} {}".format(port, zk_server_id, zk_nodes))
def mount_ebs_volumes(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
sudo("apt-get -y install xfsprogs")
for ebs in host_config['ec2-mounts']:
device = ebs['device']
mount = ebs['mount']
sudo("mkdir -p {}".format(mount))
sudo("mv /etc/fstab /etc/fstab.old")
sudo("touch /etc/fstab")
if sudo('mkfs.xfs -f {0}'.format(device), warn_only=True):
run("echo '{0}\t{1}\txfs\tdefaults\t0\t0' | sudo tee -a /etc/fstab".format(device, mount))
sudo('sudo mount -a')
logger.info("EBS volume {} : {} mounted.".format(device, mount))
def connect_to_instance_in_ssh(address, keypair_path, user='root'):
"""
Run the command LS on a given instance
:param address: ip or dns name of a machine
:type address: str
:param keypair_path: keypair path
:type keypair_path: str
"""
env.host_string = address
env.user = user
env.parallel = False
env.key_filename = keypair_path
env.disable_known_hosts = True
env.connection_attempts = 10
env.timeout = 120
ocb.log(run('ls -la /root'), level='INFO')
def run_download_db(filename=None):
"""
Downloads the database from the server into your local machine.
In order to import the downloaded database, run ``fab import_db``
Usage::
fab prod run_download_db
fab prod run_download_db:filename=foobar.dump
"""
if not filename:
filename = settings.DB_DUMP_FILENAME
if env.key_filename:
ssh = settings.PROJECT_NAME
else:
ssh = '{0}@{1}'.format(env.user, env.host_string)
local('scp {0}:{1}{2} .'.format(
ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR'), filename))
def run_download_media(filename=None):
"""
Downloads the media dump from the server into your local machine.
In order to import the downloaded media dump, run ``fab import_media``
Usage::
fab prod run_download_media
fab prod run_download_media:filename=foobar.tar.gz
"""
if not filename:
filename = settings.MEDIA_DUMP_FILENAME
if env.key_filename:
ssh = settings.PROJECT_NAME
else:
ssh = '{0}@{1}'.format(env.user, env.host_string)
local('scp {0}:{1}{2} .'.format(
ssh, settings.FAB_SETTING('SERVER_MEDIA_BACKUP_DIR'), filename))
def run_upload_db(filename=None):
"""
Uploads your local database to the server.
You can create a local dump with ``fab export_db`` first.
In order to import the database on the server you still need to SSH into
the server.
Usage::
fab prod run_upload_db
fab prod run_upload_db:filename=foobar.dump
"""
if not filename:
filename = settings.DB_DUMP_FILENAME
if env.key_filename:
ssh = settings.PROJECT_NAME
else:
ssh = '{0}@{1}'.format(env.user, env.host_string)
local('scp {0} {1}:{3}'.format(
filename, ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR')))
def setup_fabric():
env.user = 'root'
env.abort_exception = UpgradeError
env.key_filename = settings.SSH_KEY_FILENAME
env.warn_only = True
output.stdout = False
output.aborts = False
def pre_start_hook(app):
from ..nodes.models import Node
# env.warn_only = True
env.user = 'root'
env.key_filename = SSH_KEY_FILENAME
output.stdout = False
output.running = False
PLUGIN_DIR = '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/'
with app.app_context():
for node in Node.query.all():
env.host_string = node.hostname
put('./node_network_plugin.sh', PLUGIN_DIR + 'kuberdock')
put('./node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py')
run('systemctl restart kuberdock-watcher')
print 'Kuberdock node parts are updated'
def __init__(self):
env.user = 'root'
env.skip_bad_hosts = True
env.key_filename = SSH_KEY_FILENAME
self._cached_drives = None
self._cached_node_ip = None
def vagrant():
"""
Run commands using vagrant
"""
vc = get_vagrant_config()
# change from the default user to 'vagrant'
env.user = vc['User']
# connect to the port-forwarded ssh
env.hosts = ['%s:%s' % (vc['HostName'], vc['Port'])]
# use vagrant ssh key
env.key_filename = vc['IdentityFile'].strip('"')
# Forward the agent if specified:
env.forward_agent = vc.get('ForwardAgent', 'no') == 'yes'
def _get_vagrant_connection():
local('vagrant up')
result = local('vagrant ssh-config', capture=True)
hostname = re.findall(r'HostName\s+([^\n]+)', result)[0]
port = re.findall(r'Port\s+([^\n]+)', result)[0]
env.hosts = ['%s:%s' % (hostname, port)]
env.user = re.findall(r'User\s+([^\n]+)', result)[0]
env.key_filename = re.findall(r'IdentityFile\s+([^\n]+)', result)[0].lstrip("\"").rstrip("\"")
def restart_all(config_file):
"""Restarts crate service on all hosts"""
cfg = helper.get_config(config_file)
for host_config in cfg['hosts']:
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
sudo('service crate restart')
def tail_log(config_file, lines=50):
"""Tails the log"""
cfg = helper.get_config(config_file)
for host_config in cfg['hosts']:
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
sudo('tail -{} /var/log/crate/uber-cluster.log'.format(lines))
def broker_install(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
java.v8_install(host_config)
software_config = helper.get_software_config(host_config, 'kafka-broker')
version = software_config.get('version', '0.10.0.1')
put('{}/software/scripts/kafka-broker.sh'.format(os.getcwd()), '~/', use_sudo=True)
sudo("chmod +x kafka-broker.sh")
sudo(". ~/kafka-broker.sh {}".format(version))
broker_id = software_config.get('broker-id', '0')
zk_hosts = software_config.get('zookeeper-hosts', 'localhost:2181')
log_directories = software_config.get('log-directories', '/var/lib/kafka-logs')
tag = '## ---- CUSTOM CONFIGURATION ---'
sudo('echo "{}" | sudo tee -a /srv/kafka/config/server.properties'.format(tag))
sudo('echo "delete.topic.enable = true" | sudo tee -a /srv/kafka/config/server.properties')
sudo('echo "broker.id={}" | sudo tee -a /srv/kafka/config/server.properties'.format(broker_id))
sudo('echo "zookeeper.connect={}" | sudo tee -a /srv/kafka/config/server.properties'.format(zk_hosts))
sudo('echo "log.dirs={}" | sudo tee -a /srv/kafka/config/server.properties'.format(log_directories))
sudo('echo "listeners=PLAINTEXT://{}:9093" | sudo tee -a /srv/kafka/config/server.properties'.format(host_config['private-ip']))
sudo('echo "{}" | sudo tee -a /srv/kafka/config/server.properties'.format(tag))
sudo("service kafka restart")
def manager_install(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
software_config = helper.get_software_config(host_config, 'kafka-manager')
zk_hosts = software_config.get('zookeeper-hosts', 'localhost:2181')
put('{}/software/scripts/kafka-manager.sh'.format(os.getcwd()), '~/', use_sudo=True)
sudo("chmod +x kafka-manager.sh")
sudo(". ~/kafka-manager.sh {}".format(zk_hosts))
def delete_topic(config_file, topic):
"""Deletes a Kafka topic | args: config_file, topic name"""
cfg = helper.get_config(config_file)
host_config = get_kafka_host_cfg(cfg)
cmd = "/srv/kafka/bin/kafka-topics.sh --delete --zookeeper {} --topic {}".format(get_zk_host(cfg), topic)
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
sudo(cmd)
def install_kv(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
put('{}/software/scripts/riak-kv.sh'.format(getcwd()), '~/', use_sudo=True)
sudo("chmod +x riak-kv.sh")
sudo(". ~/riak-kv.sh")
def v8_install(host_config):
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
put('{}/software/scripts/java-8.sh'.format(getcwd()), '~/', use_sudo=True)
sudo("chmod +x java-8.sh")
sudo(". ~/java-8.sh")
def nodetool(config_file, cmd):
"""Send commands to Cassandra nodetool | args: config file, nodetool command """
cfg = helper.get_config(config_file)
host_config = get_cassandra_host_cfg(cfg)
env.host_string = helper.get_env_host_string(host_config)
env.user = helper.get_env_user(host_config)
env.key_filename = helper.get_env_key_filename(host_config)
sudo("nodetool {}".format(cmd))
def __init__(self, hostip):
env.hosts = hostip
env.user = settings.DEPLOYUSER
env.abort_on_prompts = True
env.key_filename = settings.DEPLOYKEY
def init_host(self):
"""
Initial host
"""
env.host_string = self.host_string
env.user = self.host_user
env.password = self.host_passwd
env.key_filename = self.host_keyfile
def login_server(self):
"""
Login to server
"""
local('ssh -i {0} {1}@{2}'.format(
env.key_filename, env.user, env.host_string
))
def __init__(self, user, ssh_key, hosts,
repository, password):
if None in [user, ssh_key, hosts, repository]:
# XXX: Charm should block instead.
# https://bugs.launchpad.net/bugs/1638772
raise Exception('Missing configuration')
self.user = user
self.ssh_key = ssh_key
self.hosts = hosts.split()
self.repository = repository
self.password = password
self.key_filename = self._write_key()
self._init_fabric()
def _init_fabric(self):
env.warn_only = True
env.connection_attempts = 10
env.timeout = 10
env.user = self.user
env.key_filename = self.key_filename
env.hosts = self.hosts
env.password = self.password
def _production_env():
# Speedup connection setup to server.
env.disable_known_hosts = True
env.key_filename = [os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')]
env.project_root = '~/app/'
def run_command(args):
cmd = args.cmd
if not cmd:
print "Please enter command to run. Example: kitrun.py remotecmd \"ls -l\""
return
tier_config = get_tier_config()
service_info = get_service_info()
tier = tier_config["tier"]
region = tier_config["region"]
service_name = service_info["name"]
public = args.public
pem_file = None
for deployable in tier_config["deployables"]:
if deployable["name"] == service_name:
pem_file = deployable["ssh_key"]
break
else:
print "Service {} not found in tier config for {}".format(service_name, tier)
sys.exit(1)
print "\n*** EXECUTING REMOTE COMMAND '{}' ON SERVICE '{}' / TIER '{}' IN REGION '{}'\n".format(cmd, service_name, tier, region)
filters = {
'tag:service-name': service_name,
"instance-state-name": "running",
"tag:tier": tier,
}
print "Finding ec2 instances in region %s from filters: %s" % (region, filters)
instances = get_ec2_instances(region, filters=filters)
if not instances:
print "Found no running ec2 instances with tag service-name={}".format(service_name)
return
for ec2 in instances:
if not public:
ip_address = ec2.private_ip_address
else:
ip_address = ec2.ip_address
print "*** Running '{}' on {}...".format(cmd, ip_address)
env.host_string = ip_address
env.user = EC2_USERNAME
env.key_filename = '~/.ssh/{}'.format(pem_file)
run(cmd)
print