def validate_keystone_roles(self, client):
"""Verify all existing roles."""
u.log.debug('Checking keystone roles...')
expected = [
{'name': 'demoRole',
'id': u.not_null},
{'name': 'Admin',
'id': u.not_null}
]
actual = client.roles.list()
ret = u.validate_role_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
python类FAIL的实例源码
def test_121_keystone_demo_domain_admin_access(self):
"""Verify that end-user domain admin does not have elevated
privileges. Catch regressions like LP#1651989"""
if self.is_mitaka_or_newer():
u.log.debug('Checking keystone end-user domain admin access...')
self.set_api_version(3)
# Authenticate as end-user domain admin and verify that we have
# appropriate access.
client = u.authenticate_keystone(
self.keystone_sentries[0].info['public-address'],
username=self.demo_domain_admin,
password='password',
api_version=3,
user_domain_name=self.demo_domain,
domain_name=self.demo_domain,
)
try:
# Expect failure
client.domains.list()
except Exception as e:
message = ('Retrieve domain list as end-user domain admin '
'NOT allowed...OK ({})'.format(e))
u.log.debug(message)
pass
else:
message = ('Retrieve domain list as end-user domain admin '
'allowed')
amulet.raise_status(amulet.FAIL, msg=message)
def test_122_keystone_project_scoped_admin_access(self):
"""Verify that user admin in domain admin_domain has access to
identity-calls guarded by rule:cloud_admin when using project
scoped token."""
if self.is_mitaka_or_newer():
u.log.debug('Checking keystone project scoped admin access...')
self.set_api_version(3)
# Authenticate as end-user domain admin and verify that we have
# appropriate access.
client = u.authenticate_keystone(
self.keystone_sentries[0].info['public-address'],
username='admin',
password='openstack',
api_version=3,
admin_port=True,
user_domain_name='admin_domain',
project_domain_name='admin_domain',
project_name='admin',
)
try:
client.domains.list()
u.log.debug('OK')
except Exception as e:
message = ('Retrieve domain list as admin with project scoped '
'token FAILED. ({})'.format(e))
amulet.raise_status(amulet.FAIL, msg=message)
def test_201_mysql_keystone_shared_db_relation(self):
"""Verify the mysql shared-db relation data"""
u.log.debug('Checking mysql to keystone db relation data...')
unit = self.pxc_sentry
relation = ['shared-db', 'keystone:shared-db']
expected_data = {
'private-address': u.valid_ip,
'password': u.not_null,
'db_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected_data)
if ret:
message = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_cinder_keystone_identity_service_relation(self):
"""Verify the cinder identity-service relation data"""
u.log.debug('Checking cinder to keystone id relation data...')
unit = self.cinder_sentry
relation = ['identity-service', 'keystone:identity-service']
expected = {
'cinder_service': 'cinder',
'cinder_region': 'RegionOne',
'cinder_public_url': u.valid_url,
'cinder_internal_url': u.valid_url,
'cinder_admin_url': u.valid_url,
'cinderv2_service': 'cinderv2',
'cinderv2_region': 'RegionOne',
'cinderv2_public_url': u.valid_url,
'cinderv2_internal_url': u.valid_url,
'cinderv2_admin_url': u.valid_url,
'private-address': u.valid_ip,
}
if self._get_openstack_release() >= self.xenial_pike:
expected.pop('cinder_region')
expected.pop('cinder_service')
expected.pop('cinder_public_url')
expected.pop('cinder_admin_url')
expected.pop('cinder_internal_url')
expected.update({
'cinderv2_region': 'RegionOne',
'cinderv3_region': 'RegionOne',
'cinderv3_service': 'cinderv3',
'cinderv3_region': 'RegionOne',
'cinderv3_public_url': u.valid_url,
'cinderv3_internal_url': u.valid_url,
'cinderv3_admin_url': u.valid_url})
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
try:
self.d.setup(timeout=timeout)
self.d.sentry.wait(timeout=timeout)
except amulet.helpers.TimeoutError:
amulet.raise_status(
amulet.FAIL,
msg="Deployment timed out ({}s)".format(timeout)
)
except Exception:
raise
def test_102_services(self):
"""Verify the expected services are running on the service units."""
services = {
self.rabbitmq_sentry: ['rabbitmq-server'],
self.nova_sentry: ['nova-compute'],
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry',
'glance-api'],
self.cinder_sentry: ['cinder-scheduler',
'cinder-volume'],
}
if self._get_openstack_release() < self.xenial_ocata:
services[self.cinder_sentry].append('cinder-api')
if self._get_openstack_release() < self.xenial_mitaka:
# For upstart systems only. Ceph services under systemd
# are checked by process name instead.
ceph_services = [
'ceph-mon-all',
'ceph-mon id=`hostname`'
]
services[self.ceph0_sentry] = ceph_services
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
ceph_osd_services = [
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
]
services[self.ceph_osd_sentry] = ceph_osd_services
if self._get_openstack_release() >= self.trusty_liberty:
services[self.keystone_sentry] = ['apache2']
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_200_ceph_nova_client_relation(self):
"""Verify the ceph to nova ceph-client relation data."""
u.log.debug('Checking ceph:nova-compute ceph-mon relation data...')
unit = self.ceph0_sentry
relation = ['client', 'nova-compute:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph-mon to nova ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_ceph_glance_client_relation(self):
"""Verify the ceph to glance ceph-client relation data."""
u.log.debug('Checking ceph:glance client relation data...')
unit = self.ceph1_sentry
relation = ['client', 'glance:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to glance ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_glance_ceph_client_relation(self):
"""Verify the glance to ceph client relation data."""
u.log.debug('Checking glance:ceph client relation data...')
unit = self.glance_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('glance to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_204_ceph_cinder_client_relation(self):
"""Verify the ceph to cinder ceph-client relation data."""
u.log.debug('Checking ceph:cinder ceph relation data...')
unit = self.ceph2_sentry
relation = ['client', 'cinder-ceph:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to cinder ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)