def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
services = {
self.cinder_sentry: ['cinder-scheduler',
'cinder-volume']
}
if self._get_openstack_release() >= self.xenial_ocata:
services.update({self.cinder_sentry: ['apache2']})
else:
services.update({self.cinder_sentry: ['cinder-api']})
for i in range(0, self.keystone_num_units):
services.update({self.keystone_sentries[i]: self.init_services})
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
python类FAIL的实例源码
def validate_keystone_tenants(self, client):
"""Verify all existing tenants."""
u.log.debug('Checking keystone tenants...')
expected = [
{'name': 'services',
'enabled': True,
'description': 'Created by Juju',
'id': u.not_null},
{'name': 'demoTenant',
'enabled': True,
'description': 'demo tenant',
'id': u.not_null},
{'name': 'admin',
'enabled': True,
'description': 'Created by Juju',
'id': u.not_null}
]
if self.keystone_api_version == 2:
actual = client.tenants.list()
else:
actual = client.projects.list()
ret = u.validate_tenant_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_138_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking keystone service catalog...')
self.set_api_version(2)
endpoint_check = {
'adminURL': u.valid_url,
'id': u.not_null,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
expected = {
'volume': [endpoint_check],
'identity': [endpoint_check]
}
if self._get_openstack_release() >= self.xenial_pike:
expected.pop('volume')
expected['volumev2'] = [endpoint_check]
actual = self.keystone_v2.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_140_keystone_endpoint(self):
"""Verify the keystone endpoint data."""
u.log.debug('Checking keystone api endpoint data...')
endpoints = self.keystone_v2.endpoints.list()
admin_port = '35357'
internal_port = public_port = '5000'
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null
}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='keystone endpoint: {}'.format(ret))
def test_142_cinder_endpoint(self):
"""Verify the cinder endpoint data."""
u.log.debug('Checking cinder endpoint...')
endpoints = self.keystone_v2.endpoints.list()
admin_port = internal_port = public_port = '8776'
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null
}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret))
def test_202_keystone_cinder_identity_service_relation(self):
"""Verify the keystone identity-service relation data"""
u.log.debug('Checking keystone to cinder id relation data...')
relation = ['identity-service', 'cinder:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_username': 'cinder_cinderv2',
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
if self._get_openstack_release() >= self.xenial_pike:
expected['service_username'] = 'cinderv3_cinderv2'
for unit in self.keystone_sentries:
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_302_keystone_logging_config(self):
"""Verify the data in the keystone logging config file"""
u.log.debug('Checking keystone config file...')
expected = {
'logger_root': {
'level': 'WARNING',
'handlers': 'file,production',
},
'handlers': {
'keys': 'production,file,devel'
},
'handler_file': {
'level': 'DEBUG',
'args': "('{}', 'a')".format(self.log_file)
}
}
for unit in self.keystone_sentries:
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, self.logging_config,
section, pairs)
if ret:
message = "keystone logging config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_100_ceph_processes(self):
"""Verify that the expected service processes are running
on each ceph unit."""
# Process name and quantity of processes to expect on each unit
ceph_processes = {
'ceph-mon': 1
}
# Units with process names and PID quantities expected
expected_processes = {
self.ceph0_sentry: ceph_processes,
self.ceph1_sentry: ceph_processes,
self.ceph2_sentry: ceph_processes
}
actual_pids = u.get_unit_process_ids(expected_processes)
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_306_nova_rbd_config(self):
"""Verify the nova config file data regarding ceph."""
u.log.debug('Checking nova (rbd) config file data...')
unit = self.nova_sentry
conf = '/etc/nova/nova.conf'
expected = {
'libvirt': {
'rbd_user': 'nova-compute',
'rbd_secret_uuid': u.not_null
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "nova (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_402_pause_resume_actions(self):
"""Veryfy that pause/resume works"""
u.log.debug("Testing pause")
cmd = "ceph -s"
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit, 'pause-health')
assert u.wait_on_action(action_id), "Pause health action failed."
output, code = sentry_unit.run(cmd)
if 'nodown' not in output or 'noout' not in output:
amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown")
u.log.debug("Testing resume")
action_id = u.run_action(sentry_unit, 'resume-health')
assert u.wait_on_action(action_id), "Resume health action failed."
output, code = sentry_unit.run(cmd)
if 'nodown' in output or 'noout' in output:
amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown")
def find_pool(sentry_unit, pool_name):
"""
This will do a ceph osd dump and search for pool you specify
:param sentry_unit: The unit to run this command from.
:param pool_name: str. The name of the Ceph pool to query
:return: str or None. The ceph pool or None if not found
"""
output, dump_code = sentry_unit.run("ceph osd dump")
if dump_code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd dump failed with output: {}".format(
output))
for line in output.split('\n'):
match = re.search(r"pool\s+\d+\s+'(?P<pool_name>.*)'", line)
if match:
name = match.group('pool_name')
if name == pool_name:
return line
return None
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units."""
sentry_units = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# FYI: No restart check as ceph services do not restart
# when charm config changes, unless monitor count increases.
def test_102_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking keystone service catalog...')
endpoint_check = {
'adminURL': u.valid_url,
'id': u.not_null,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
expected = {
'network': [endpoint_check],
'compute': [endpoint_check],
'identity': [endpoint_check]
}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_104_network_endpoint(self):
"""Verify the neutron network endpoint data."""
u.log.debug('Checking neutron network api endpoint data...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '9696'
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null
}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='glance endpoint: {}'.format(ret))
def test_200_odl_controller_controller_api_relation(self):
"""Verify the odl-controller to neutron-api-odl relation data"""
u.log.debug('Checking odl-controller to neutron-api-odl relation data')
unit = self.odl_controller_sentry
relation = ['controller-api', 'neutron-api-odl:odl-controller']
expected = {
'private-address': u.valid_ip,
'username': 'admin',
'password': 'admin',
'port': '8080',
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('odl-controller controller-api', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
services = {
self.cinder_sentry: ['cinder-scheduler',
'cinder-volume']
}
if self._get_openstack_release() >= self.xenial_ocata:
services.update({self.cinder_sentry: ['apache2']})
else:
services.update({self.cinder_sentry: ['cinder-api']})
if self.is_liberty_or_newer():
for i in range(0, self.keystone_num_units):
services.update({self.keystone_sentries[i]: ['apache2']})
else:
services.update({self.keystone_sentries[0]: ['keystone']})
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def validate_keystone_tenants(self, client):
"""Verify all existing tenants."""
u.log.debug('Checking keystone tenants...')
expected = [
{'name': 'services',
'enabled': True,
'description': 'Created by Juju',
'id': u.not_null},
{'name': 'demoTenant',
'enabled': True,
'description': 'demo tenant',
'id': u.not_null},
{'name': 'admin',
'enabled': True,
'description': 'Created by Juju',
'id': u.not_null}
]
if self.keystone_api_version == 2:
actual = client.tenants.list()
else:
actual = client.projects.list()
ret = u.validate_tenant_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_138_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking keystone service catalog...')
self.set_api_version(2)
endpoint_check = {
'adminURL': u.valid_url,
'id': u.not_null,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
expected = {
'volume': [endpoint_check],
'identity': [endpoint_check]
}
actual = self.keystone_v2.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_140_keystone_endpoint(self):
"""Verify the keystone endpoint data."""
u.log.debug('Checking keystone api endpoint data...')
endpoints = self.keystone_v2.endpoints.list()
admin_port = '35357'
internal_port = public_port = '5000'
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null
}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='keystone endpoint: {}'.format(ret))
def test_142_cinder_endpoint(self):
"""Verify the cinder endpoint data."""
u.log.debug('Checking cinder endpoint...')
endpoints = self.keystone_v2.endpoints.list()
admin_port = internal_port = public_port = '8776'
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null
}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret))
def test_202_keystone_cinder_identity_service_relation(self):
"""Verify the keystone identity-service relation data"""
u.log.debug('Checking keystone to cinder id relation data...')
relation = ['identity-service', 'cinder:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_username': 'cinder_cinderv2',
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
for unit in self.keystone_sentries:
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_cinder_keystone_identity_service_relation(self):
"""Verify the cinder identity-service relation data"""
u.log.debug('Checking cinder to keystone id relation data...')
unit = self.cinder_sentry
relation = ['identity-service', 'keystone:identity-service']
expected = {
'cinder_service': 'cinder',
'cinder_region': 'RegionOne',
'cinder_public_url': u.valid_url,
'cinder_internal_url': u.valid_url,
'cinder_admin_url': u.valid_url,
'cinderv2_service': 'cinderv2',
'cinderv2_region': 'RegionOne',
'cinderv2_public_url': u.valid_url,
'cinderv2_internal_url': u.valid_url,
'cinderv2_admin_url': u.valid_url,
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_services(self):
"""Verify the expected services are running on the corresponding
service units."""
commands = {
self.compute_sentry: ['nova-compute',
'openvswitch-switch'],
self.gateway_sentry: ['openvswitch-switch',
'neutron-dhcp-agent',
'neutron-l3-agent',
'neutron-metadata-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'nova-api-metadata'],
self.odl_controller_sentry: ['odl-controller'],
}
if self._get_openstack_release() >= self.xenial_newton:
commands[self.gateway_sentry].remove('neutron-lbaas-agent')
commands[self.gateway_sentry].append('neutron-lbaasv2-agent')
ret = u.validate_services_by_name(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_services(self):
"""Verify the expected services are running on the corresponding
service units."""
commands = {
self.compute_sentry: ['nova-compute',
'openvswitch-switch'],
self.gateway_sentry: ['openvswitch-switch',
'neutron-dhcp-agent',
'neutron-l3-agent',
'neutron-metadata-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'nova-api-metadata'],
self.odl_controller_sentry: ['odl-controller'],
}
if self._get_openstack_release() >= self.xenial_newton:
commands[self.gateway_sentry].remove('neutron-lbaas-agent')
commands[self.gateway_sentry].append('neutron-lbaasv2-agent')
ret = u.validate_services_by_name(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise