Helm Cinder support multiple Ceph backends

Currenty sysinv generates overrides only for default and backup
Ceph pools. Adding a new Ceph storage backend does not make it
available to stx-openstack application.

Iterate all Ceph storage backend and create corresponding
Ceph pool overrides which are then used by Cinder Helm
chart to setup Cinder configuration and pool access.

Change-Id: I2ca84406238e6c7462709822b303e25176fb9c8a
Depends-On: I29c7d3ed118f4a6726f2ea887a165f256bc32fd5
Story: 2003909
Task: 30351
Signed-off-by: Daniel Badea <daniel.badea@windriver.com>
This commit is contained in:
Daniel Badea 2019-06-12 15:36:19 +00:00 committed by dbadea
parent 7370282e64
commit 09395bd525
3 changed files with 29 additions and 24 deletions

View File

@ -1,4 +1,4 @@
SRC_DIR="stx-openstack-helm"
COPY_LIST_TO_TAR="$PKG_BASE/../../../helm-charts/garbd \
$PKG_BASE/../../../helm-charts/nginx-ports-control"
TIS_PATCH_VER=16
TIS_PATCH_VER=17

View File

@ -736,6 +736,8 @@ CEPH_POOL_VOLUMES_NAME = 'cinder-volumes'
CEPH_POOL_VOLUMES_PG_NUM = 512
CEPH_POOL_VOLUMES_PGP_NUM = 512
CEPH_POOL_VOLUMES_QUOTA_GIB = 0
CEPH_POOL_VOLUMES_CHUNK_SIZE = 8
CEPH_POOL_VOLUMES_APP_NAME = 'cinder-volumes'
CEPH_POOL_IMAGES_NAME = 'images'
CEPH_POOL_IMAGES_PG_NUM = 256

View File

@ -58,37 +58,40 @@ class CinderHelm(openstack.OpenstackBaseHelm):
if not ceph_backend:
return {}
primary_tier_name =\
constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]
replication, min_replication =\
StorageBackendConfig.get_ceph_pool_replication(self.dbapi)
rule_name = "{0}{1}{2}".format(
constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH],
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
pools = {}
for backend in self.dbapi.storage_ceph_get_list():
if backend.tier_name == primary_tier_name:
pool_name = constants.CEPH_POOL_VOLUMES_NAME
else:
pool_name = "%s-%s" % (constants.CEPH_POOL_VOLUMES_NAME,
backend.tier_name)
rule_name = "{0}{1}{2}".format(
backend.tier_name, constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
pool = {
'replication': replication,
'crush_rule': rule_name.encode('utf8', 'strict'),
'chunk_size': constants.CEPH_POOL_VOLUMES_CHUNK_SIZE,
'app_name': constants.CEPH_POOL_VOLUMES_APP_NAME
}
pools[pool_name.encode('utf8', 'strict')] = pool
if backend.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
# Backup uses the same replication and crush rule as
# the default storage backend
pools['backup'] = dict(pool)
conf_ceph = {
return {
'monitors': self._get_formatted_ceph_monitor_ips(),
'admin_keyring': 'null',
'pools': {
'backup': {
# We use the chart to configure the pool for backups, so
# it's safe to use the same replication as for the primary
# tier pools.
'replication': replication,
'crush_rule': rule_name,
},
'volume': {
# The cinder chart doesn't currently support specifying
# the config for multiple volume/backup pools.
'replication': replication,
'crush_rule': rule_name,
}
}
'pools': pools
}
return conf_ceph
def _get_conf_cinder_overrides(self):
# Get all the internal CEPH backends.
backends = self.dbapi.storage_backend_get_list_by_type(