Ceph for standard: Add deployment model to System Inventory cluster API

In order to enable StarlingX Dashboard to display and allow configu-
ration of Ceph we need a new field, deployment_model, added to the
'cluster' API reponses. This field is generated at runtime based on
the configured ceph monitors and type of system.

Four options have been defined for deployment_model:
o storage-nodes: OSDs are deployed on storage nodes. Main trigger for
  this model is the creation of a node with storage personality.
o controller-nodes: OSDs are deployed on controller nodes. Main
  trigger is creation of Ceph monitor on a worker node. This is the
  implicit model on AIO Duplex.
o aio-sx: OSDs are deployed on a simplex controller.
o undefined: before either a storage node is installed or a compute
  monitor is created on a worker.

This commit also enables storage profiles on controllers based on
storage model and adds the new field to CLI output of
'system cluster-*' commands.

Change-Id: I4d5cb6120abb176025857ae9749e8eb6ca121475
Implements: containerization-2002844-CEPH-persistent-storage-backend-for-Kubernetes
Story: 2002844
Task: 29113
Signed-off-by: Ovidiu Poncea <Ovidiu.Poncea@windriver.com>
This commit is contained in:
Ovidiu Poncea 2019-01-25 18:50:55 +02:00
parent 02a4f12dc4
commit 8db7a1bc9d
7 changed files with 46 additions and 32 deletions

View File

@ -42,9 +42,9 @@ def _tier_formatter(values):
def _print_cluster_show(obj): def _print_cluster_show(obj):
fields = ['uuid', 'cluster_uuid', 'type', 'name', 'peers', 'tiers'] fields = ['uuid', 'cluster_uuid', 'type', 'name', 'peers', 'tiers', 'deployment_model']
labels = ['uuid', 'cluster_uuid', 'type', 'name', 'replication_groups', labels = ['uuid', 'cluster_uuid', 'type', 'name', 'replication_groups',
'storage_tiers'] 'storage_tiers', 'deployment_model']
data = [(f, getattr(obj, f, '')) for f in fields] data = [(f, getattr(obj, f, '')) for f in fields]
utils.print_tuple_list( utils.print_tuple_list(
data, labels, formatters={'peers': _peer_formatter, data, labels, formatters={'peers': _peer_formatter,
@ -65,7 +65,7 @@ def do_cluster_list(cc, args):
"""List Clusters.""" """List Clusters."""
clusters = cc.cluster.list() clusters = cc.cluster.list()
fields = ['uuid', 'cluster_uuid', 'type', 'name'] fields = ['uuid', 'cluster_uuid', 'type', 'name', 'deployment_model']
utils.print_list(clusters, fields, fields, sortby=1) utils.print_list(clusters, fields, fields, sortby=1)

View File

@ -131,8 +131,6 @@ class CephMon(base.APIBase):
'ceph_mon_gib', 'ceph_mon_gib',
'state', 'state',
'task', 'task',
'ceph_mon_dev_ctrl0',
'ceph_mon_dev_ctrl1',
'hostname']) 'hostname'])
if ceph_mon.device_path: if ceph_mon.device_path:

View File

@ -35,6 +35,7 @@ from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils from sysinv.api.controllers.v1 import utils
from sysinv.api.controllers.v1 import storage_tier as storage_tier_api from sysinv.api.controllers.v1 import storage_tier as storage_tier_api
from sysinv.api.controllers.v1.query import Query from sysinv.api.controllers.v1.query import Query
from sysinv.common import ceph
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import utils as cutils from sysinv.common import utils as cutils
@ -98,6 +99,9 @@ class Cluster(base.APIBase):
name = wtypes.text name = wtypes.text
"User defined name of the cluster" "User defined name of the cluster"
deployment_model = wtypes.text
"Deployment model used by cluster"
peers = types.MultiType([list]) peers = types.MultiType([list])
"List of peers info in the cluster" "List of peers info in the cluster"
@ -132,7 +136,18 @@ class Cluster(base.APIBase):
if not expand: if not expand:
cluster.unset_fields_except(['uuid', 'cluster_uuid', cluster.unset_fields_except(['uuid', 'cluster_uuid',
'type', 'name', 'peers', 'type', 'name', 'peers',
'tiers']) 'tiers', 'deployment_model'])
# All Ceph type clusters have the same storage model
if cluster.type == constants.CLUSTER_TYPE_CEPH:
try:
# Storage model is defined dynamically, displayed by CLI
# and used by Horizon.
cluster.deployment_model = ceph.get_ceph_storage_model()
except Exception:
cluster.deployment_model = constants.CEPH_UNDEFINED_MODEL
else:
cluster.deployment_model = None
cluster.links = [link.Link.make_link('self', pecan.request.host_url, cluster.links = [link.Link.make_link('self', pecan.request.host_url,
'clusters', cluster.uuid), 'clusters', cluster.uuid),

View File

@ -46,6 +46,7 @@ from sysinv.api.controllers.v1 import cpu_utils
from sysinv.api.controllers.v1 import types from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import port as port_api from sysinv.api.controllers.v1 import port as port_api
from sysinv.api.controllers.v1 import ethernet_port as ethernet_port_api from sysinv.api.controllers.v1 import ethernet_port as ethernet_port_api
from sysinv.common import ceph
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import utils as cutils from sysinv.common import utils as cutils
@ -898,29 +899,25 @@ class ProfileController(rest.RestController):
if 'profiletype' in profile_dict and profile_dict['profiletype']: if 'profiletype' in profile_dict and profile_dict['profiletype']:
profiletype = profile_dict['profiletype'] profiletype = profile_dict['profiletype']
if profiletype == constants.PROFILE_TYPE_STORAGE: if profiletype == constants.PROFILE_TYPE_STORAGE:
stor_model = ceph.get_ceph_storage_model()
if constants.WORKER in from_ihost.subfunctions: if constants.WORKER in from_ihost.subfunctions:
# combo has no ceph # combo has no ceph
profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE
LOG.info("No ceph backend for stor profile, assuming " LOG.info("No ceph backend for stor profile, assuming "
"%s" % profiletype) "%s" % profiletype)
elif constants.CONTROLLER in from_ihost.subfunctions: elif not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.CINDER_BACKEND_CEPH
):
raise wsme.exc.ClientSideError(_("Storage profiles " raise wsme.exc.ClientSideError(_("Storage profiles "
"not applicable for %s with subfunctions %s." % "not applicable for %s with non Ceph backend." %
(from_ihost.hostname, from_ihost.subfunctions))) from_ihost.hostname))
elif constants.STORAGE in from_ihost.subfunctions: elif (from_ihost.personality == constants.CONTROLLER and
if not StorageBackendConfig.has_backend_configured( stor_model != constants.CEPH_CONTROLLER_MODEL):
pecan.request.dbapi,
constants.CINDER_BACKEND_CEPH
):
raise wsme.exc.ClientSideError(_("Storage profiles "
"not applicable for %s with subfunctions %s "
"and non Ceph backend." %
(from_ihost.hostname, from_ihost.subfunctions)))
else:
raise wsme.exc.ClientSideError(_("Storage profiles " raise wsme.exc.ClientSideError(_("Storage profiles "
"not applicable for %s with unsupported " "not applicable for %s as storage deployment "
"subfunctions %s." % "model is: %s" %
(from_ihost.hostname, from_ihost.subfunctions))) (from_ihost.hostname, stor_model)))
# Create profile # Create profile
LOG.debug("iprofileihost is: %s " % profile_dict) LOG.debug("iprofileihost is: %s " % profile_dict)

View File

@ -370,7 +370,8 @@ class CephApiOperator(object):
def crushmap_tiers_add(self): def crushmap_tiers_add(self):
"""Add all custom storage tiers to the crushmap. """ """Add all custom storage tiers to the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster') ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
# get the list of tiers # get the list of tiers
tiers = pecan.request.dbapi.storage_tier_get_by_cluster( tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
@ -404,7 +405,8 @@ class CephApiOperator(object):
def _crushmap_tiers_bucket_add(self, bucket_name, bucket_type): def _crushmap_tiers_bucket_add(self, bucket_name, bucket_type):
"""Add a new bucket to all the tiers in the crushmap. """ """Add a new bucket to all the tiers in the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster') ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
tiers = pecan.request.dbapi.storage_tier_get_by_cluster( tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
cluster[0].uuid) cluster[0].uuid)
for t in tiers: for t in tiers:
@ -418,7 +420,8 @@ class CephApiOperator(object):
def _crushmap_tiers_bucket_remove(self, bucket_name): def _crushmap_tiers_bucket_remove(self, bucket_name):
"""Remove an existing bucket from all the tiers in the crushmap. """ """Remove an existing bucket from all the tiers in the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster') ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
tiers = pecan.request.dbapi.storage_tier_get_by_cluster( tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
cluster[0].uuid) cluster[0].uuid)
for t in tiers: for t in tiers:
@ -433,7 +436,8 @@ class CephApiOperator(object):
ancestor_name): ancestor_name):
"""Move common bucket in all the tiers in the crushmap. """ """Move common bucket in all the tiers in the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster') ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
tiers = pecan.request.dbapi.storage_tier_get_by_cluster( tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
cluster[0].uuid) cluster[0].uuid)
for t in tiers: for t in tiers:
@ -769,8 +773,6 @@ def get_ceph_storage_model(dbapi=None):
for chost in controller_hosts: for chost in controller_hosts:
istors = dbapi.istor_get_by_ihost(chost['uuid']) istors = dbapi.istor_get_by_ihost(chost['uuid'])
if len(istors): if len(istors):
LOG.info("Controller host %s has OSDs configured. System has ceph "
"controller storage." % chost['hostname'])
is_controller_model = True is_controller_model = True
break break

View File

@ -383,6 +383,10 @@ GLANCE_BACKEND_RBD = 'rbd'
GLANCE_BACKEND_HTTP = 'http' GLANCE_BACKEND_HTTP = 'http'
GLANCE_BACKEND_GLANCE = 'glance' GLANCE_BACKEND_GLANCE = 'glance'
# Clusters
CLUSTER_TYPE_CEPH = "ceph"
CLUSTER_CEPH_DEFAULT_NAME = "ceph_cluster"
# Storage Tiers: types (aligns with polymorphic backends) # Storage Tiers: types (aligns with polymorphic backends)
SB_TIER_TYPE_CEPH = SB_TYPE_CEPH SB_TIER_TYPE_CEPH = SB_TYPE_CEPH
SB_TIER_SUPPORTED = [SB_TIER_TYPE_CEPH] SB_TIER_SUPPORTED = [SB_TIER_TYPE_CEPH]
@ -438,8 +442,8 @@ SB_CONFIGURATION_TIMEOUT = 1200
# Controller model: OSDs are on controllers, no storage nodes can # Controller model: OSDs are on controllers, no storage nodes can
# be defined. # be defined.
# Storage model: OSDs are on dedicated storage nodes. # Storage model: OSDs are on dedicated storage nodes.
CEPH_STORAGE_MODEL = 'storage' CEPH_STORAGE_MODEL = 'storage-nodes'
CEPH_CONTROLLER_MODEL = 'controller' CEPH_CONTROLLER_MODEL = 'controller-nodes'
CEPH_AIO_SX_MODEL = 'aio-sx' CEPH_AIO_SX_MODEL = 'aio-sx'
CEPH_UNDEFINED_MODEL = 'undefined' CEPH_UNDEFINED_MODEL = 'undefined'

View File

@ -1463,8 +1463,6 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
'state': 'String', 'state': 'String',
'task': 'String', 'task': 'String',
'ceph_mon_gib': 'Integer', 'ceph_mon_gib': 'Integer',
'ceph_mon_dev_ctrl0': 'String',
'ceph_mon_dev_ctrl1': 'String',
} }
for col, coltype in storconfigs_cols.items(): for col, coltype in storconfigs_cols.items():
self.assertTrue(isinstance(storconfigs.c[col].type, self.assertTrue(isinstance(storconfigs.c[col].type,