def update_pools(self):
# designate-manage communicates with designate via message bus so no
# need to set OS_ vars
# NOTE(AJK) this runs with every hook (once most relations are up) and
# so if it fails it will be picked up by the next relation change or
# update-status. i.e. it will heal eventually.
if hookenv.is_leader():
try:
cmd = "designate-manage pool update"
# Note(tinwood) that this command may fail if the pools.yaml
# doesn't actually contain any pools. This happens when the
# relation is broken, which errors out the charm. This stops
# this happening and logs the error.
subprocess.check_call(cmd.split(), timeout=60)
except subprocess.CalledProcessError as e:
hookenv.log("designate-manage pool update failed: {}"
.format(str(e)))
except subprocess.TimeoutExpired as e:
# the timeout is if the rabbitmq server has gone away; it just
# retries continuously; this lets the hook complete.
hookenv.log("designate-manage pool command timed out: {}".
format(str(e)))
python类is_leader()的实例源码
def init_rndckey(self):
"""Create a RNDC key if needed
Return the rndc key from the leader DB or if one is not present
generate a new one.
:returns: str: rndc key
"""
secret = DesignateBindCharm.get_rndc_secret()
hookenv.log('Retrieving secret', level=hookenv.DEBUG)
if not secret:
hookenv.log('Secret not found in leader db', level=hookenv.DEBUG)
if hookenv.is_leader():
hookenv.log('Creating new secret as leader',
level=hookenv.DEBUG)
secret = self.generate_rndc_key()
hookenv.leader_set({LEADERDB_SECRET_KEY: secret})
return secret
designate_bind_handlers.py 文件源码
项目:charm-designate-bind
作者: openstack
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def check_zone_status(hacluster):
'''This unit has not been initialised yet so request a zones file or
set an inital sync'''
if hookenv.is_leader():
if designate_bind.get_sync_time():
# This unit is not the leader but a sync target has already been
# set suggests this is a new unit which has been nominated as
# leader early in its lifecycle. The leader responds to sync
# requests and this unit is the leader so not worth sending out a
# sync request.
designate_bind.retrieve_zones()
else:
# This unit is the leader and no other unit has set up a sync
# target then create one since this is a new deployment
designate_bind.setup_sync()
reactive.set_state('zones.initialised')
else:
# If this unit is not the leader as is not yet initialised then request
# a zones file from a peer
designate_bind.request_sync(hacluster)
def ceilometer_upgrade():
"""Execute ceilometer-upgrade command, with retry on failure if gnocchi
API is not ready for requests"""
if is_leader():
if (CompareOpenStackReleases(os_release('ceilometer-common')) >=
'newton'):
cmd = ['ceilometer-upgrade']
else:
cmd = ['ceilometer-dbsync']
subprocess.check_call(cmd)
def ceilometer_upgrade():
"""Execute ceilometer-upgrade command, with retry on failure if gnocchi
API is not ready for requests"""
if is_leader():
if (CompareOpenStackReleases(os_release('ceilometer-common')) >=
'newton'):
cmd = ['ceilometer-upgrade']
else:
cmd = ['ceilometer-dbsync']
subprocess.check_call(cmd)
def ceilometer_upgrade():
"""Execute ceilometer-upgrade command, with retry on failure if gnocchi
API is not ready for requests"""
if is_leader():
if (CompareOpenStackReleases(os_release('ceilometer-common')) >=
'newton'):
cmd = ['ceilometer-upgrade']
else:
cmd = ['ceilometer-dbsync']
subprocess.check_call(cmd)
def db_sync(self):
"""Perform a database sync using the command defined in the
self.sync_cmd attribute. The services defined in self.services are
restarted after the database sync.
"""
if not self.db_sync_done() and hookenv.is_leader():
subprocess.check_call(self.sync_cmd)
hookenv.leader_set({'db-sync-done': True})
# Restart services immediately after db sync as
# render_domain_config needs a working system
self.restart_all()
def do_openstack_upgrade_db_migration(self):
"""Run database migration after upgrade
:returns: None
"""
if hookenv.is_leader():
subprocess.check_call(self.sync_cmd)
else:
hookenv.log("Deferring DB sync to leader", level=hookenv.INFO)
# NOTE(jamespage): Not currently used - switch from c-h function for perf?
def db_sync(self):
"""Perform a database sync using the command defined in the
self.sync_cmd attribute. The services defined in self.services are
restarted after the database sync.
"""
if not self.db_sync_done() and hookenv.is_leader():
subprocess.check_call(['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head'])
subprocess.check_call(['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'stamp', 'head'])
subprocess.check_call(['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate'])
hookenv.leader_set({'db-sync-done': True})
# Restart services immediately after db sync as
# render_domain_config needs a working system
self.restart_all()
def create_initial_servers_and_domains(cls):
"""Create the nameserver entry and domains based on the charm user
supplied config
NOTE(AJK): This only wants to be done ONCE and by the leader, so we use
leader settings to store that we've done it, after it's successfully
completed.
@returns None
"""
KEY = 'create_initial_servers_and_domains'
if hookenv.is_leader() and not hookenv.leader_get(KEY):
nova_domain_name = hookenv.config('nova-domain')
neutron_domain_name = hookenv.config('neutron-domain')
with cls.check_zone_ids(nova_domain_name, neutron_domain_name):
if hookenv.config('nameservers'):
for ns in hookenv.config('nameservers').split():
cls.create_server(ns)
else:
hookenv.log('No nameserver specified, skipping creation of'
'nova and neutron domains',
level=hookenv.WARNING)
return
if nova_domain_name:
cls.create_domain(
nova_domain_name,
hookenv.config('nova-domain-email'))
if neutron_domain_name:
cls.create_domain(
neutron_domain_name,
hookenv.config('neutron-domain-email'))
# if this fails, we weren't the leader any more; another unit may
# attempt to do this too.
hookenv.leader_set({KEY: 'done'})
designate_bind_handlers.py 文件源码
项目:charm-designate-bind
作者: openstack
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def setup_sync_target_alone():
'''If this is the only unit in the application then setup a sync target.
This will likely by empty as zones.initialised is only unset when a unit
frst comes up but the presence of the target allows subsequent units to
bootstrap if leadership flips to them as they come up'''
if hookenv.is_leader():
designate_bind.setup_sync()
reactive.set_state('zones.initialised')
designate_bind_handlers.py 文件源码
项目:charm-designate-bind
作者: openstack
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def process_sync_requests(hacluster):
'''If this unit is the leader process and new sync requests'''
if hookenv.is_leader():
designate_bind.process_requests(hacluster)
def cluster_token(self):
''' Getter to return the unique cluster token. '''
if not is_leader():
return leader_get('token')
if not self.db.get('cluster-token'):
token = self.id_generator()
self.db.set('cluster-token', token)
return token
return self.db.get('cluster-token')
def test_is_leader_unsupported(self, check_output_):
check_output_.side_effect = OSError(2, 'is-leader')
self.assertRaises(NotImplementedError, hookenv.is_leader)
def test_is_leader(self, check_output_):
check_output_.return_value = b'false'
self.assertFalse(hookenv.is_leader())
check_output_.return_value = b'true'
self.assertTrue(hookenv.is_leader())
def acquire(self, lock):
'''Acquire the named lock, non-blocking.
The lock may be granted immediately, or in a future hook.
Returns True if the lock has been granted. The lock will be
automatically released at the end of the hook in which it is
granted.
Do not mindlessly call this method, as it triggers a cascade of
hooks. For example, if you call acquire() every time in your
peers relation-changed hook you will end up with an infinite loop
of hooks. It should almost always be guarded by some condition.
'''
unit = hookenv.local_unit()
ts = self.requests[unit].get(lock)
if not ts:
# If there is no outstanding request on the peers relation,
# create one.
self.requests.setdefault(lock, {})
self.requests[unit][lock] = _timestamp()
self.msg('Requested {}'.format(lock))
# If the leader has granted the lock, yay.
if self.granted(lock):
self.msg('Acquired {}'.format(lock))
return True
# If the unit making the request also happens to be the
# leader, it must handle the request now. Even though the
# request has been stored on the peers relation, the peers
# relation-changed hook will not be triggered.
if hookenv.is_leader():
return self.grant(lock, unit)
return False # Can't acquire lock, yet. Maybe next hook.
def handle(self):
if not hookenv.is_leader():
return # Only the leader can grant requests.
self.msg('Leader handling coordinator requests')
# Clear our grants that have been released.
for unit in self.grants.keys():
for lock, grant_ts in list(self.grants[unit].items()):
req_ts = self.requests.get(unit, {}).get(lock)
if req_ts != grant_ts:
# The request timestamp does not match the granted
# timestamp. Several hooks on 'unit' may have run
# before the leader got a chance to make a decision,
# and 'unit' may have released its lock and attempted
# to reacquire it. This will change the timestamp,
# and we correctly revoke the old grant putting it
# to the end of the queue.
ts = datetime.strptime(self.grants[unit][lock],
_timestamp_format)
del self.grants[unit][lock]
self.released(unit, lock, ts)
# Grant locks
for unit in self.requests.keys():
for lock in self.requests[unit]:
self.grant(lock, unit)
def grant(self, lock, unit):
'''Maybe grant the lock to a unit.
The decision to grant the lock or not is made for $lock
by a corresponding method grant_$lock, which you may define
in a subclass. If no such method is defined, the default_grant
method is used. See Serial.default_grant() for details.
'''
if not hookenv.is_leader():
return False # Not the leader, so we cannot grant.
# Set of units already granted the lock.
granted = set()
for u in self.grants:
if lock in self.grants[u]:
granted.add(u)
if unit in granted:
return True # Already granted.
# Ordered list of units waiting for the lock.
reqs = set()
for u in self.requests:
if u in granted:
continue # In the granted set. Not wanted in the req list.
for _lock, ts in self.requests[u].items():
if _lock == lock:
reqs.add((ts, u))
queue = [t[1] for t in sorted(reqs)]
if unit not in queue:
return False # Unit has not requested the lock.
# Locate custom logic, or fallback to the default.
grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
if grant_func(lock, unit, granted, queue):
# Grant the lock.
self.msg('Leader grants {} to {}'.format(lock, unit))
self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
return True
return False
def __setitem__(self, key, value):
if not hookenv.is_leader():
raise TypeError('Not the leader. Cannot change leader settings.')
if value is not None and not isinstance(value, six.string_types):
# We don't do implicit casting. This would cause simple
# types like integers to be read back as strings in subsequent
# hooks, and mutable types would require a lot of wrapping
# to ensure leader-set gets called when they are mutated.
raise ValueError('Only string values allowed')
hookenv.leader_set({key: value})