Merge "Update host force lock semantic check for ceph monitors"

This commit is contained in:
Zuul 2022-03-24 17:16:33 +00:00 committed by Gerrit Code Review
commit 59f027fc42
3 changed files with 34 additions and 57 deletions

View File

@ -2436,16 +2436,7 @@ class HostController(rest.RestController):
if (personality is not None and
personality.find(constants.STORAGE_HOSTNAME) != -1 and
not skip_ceph_checks):
num_monitors, required_monitors, __ = \
self._ceph.get_monitors_status(pecan.request.dbapi)
if num_monitors < required_monitors:
raise wsme.exc.ClientSideError(_(
"Only %d storage "
"monitor available. At least %s unlocked and "
"enabled hosts with monitors are required. Please"
" ensure hosts with monitors are unlocked and "
"enabled.") %
(num_monitors, required_monitors))
utils.check_node_lock_ceph_mon(ihost, force=True, ceph_helper=self._ceph)
# If it is the last storage node to delete, we need to delete
# ceph osd pools and update additional tier status to "defined"
@ -5603,18 +5594,10 @@ class HostController(rest.RestController):
and hostupdate.ihost_orig['hostname'] == \
constants.CONTROLLER_1_HOSTNAME:
check_storage_monitors = False
if check_storage_monitors:
num_monitors, required_monitors, quorum_names = \
self._ceph.get_monitors_status(pecan.request.dbapi)
if (hostupdate.ihost_orig['hostname'] in quorum_names and
num_monitors - 1 < required_monitors):
raise wsme.exc.ClientSideError(_(
"Only %d storage "
"monitor available. At least %s unlocked and "
"enabled hosts with monitors are required. Please"
" ensure hosts with monitors are unlocked and "
"enabled.") %
(num_monitors, required_monitors))
utils.check_node_lock_ceph_mon(
hostupdate.ihost_orig, force=force, ceph_helper=self._ceph)
if not force:
# sm-lock-pre-check
@ -6232,18 +6215,8 @@ class HostController(rest.RestController):
constants.ADMIN_UNLOCKED and
hostupdate.ihost_orig['operational'] ==
constants.OPERATIONAL_ENABLED):
num_monitors, required_monitors, quorum_names = \
self._ceph.get_monitors_status(pecan.request.dbapi)
if (hostupdate.ihost_orig['hostname'] in quorum_names and
num_monitors - 1 < required_monitors):
raise wsme.exc.ClientSideError(_(
"Only %d storage "
"monitor available. At least %s unlocked and "
"enabled hosts with monitors are required. Please"
" ensure hosts with monitors are unlocked and "
"enabled.") %
(num_monitors, required_monitors))
utils.check_node_lock_ceph_mon(
hostupdate.ihost_orig, force=force, ceph_helper=self._ceph)
storage_nodes = pecan.request.dbapi.ihost_get_by_personality(
constants.STORAGE)
@ -6387,17 +6360,8 @@ class HostController(rest.RestController):
constants.ADMIN_UNLOCKED and
hostupdate.ihost_orig['operational'] ==
constants.OPERATIONAL_ENABLED):
num_monitors, required_monitors, quorum_names = \
self._ceph.get_monitors_status(pecan.request.dbapi)
if (hostname in quorum_names and
num_monitors - 1 < required_monitors):
raise wsme.exc.ClientSideError(_(
"Only %d Ceph "
"monitors available. At least %s unlocked and "
"enabled hosts with monitors are required. "
"Please ensure hosts with monitors are "
"unlocked and enabled.") %
(num_monitors, required_monitors))
utils.check_node_lock_ceph_mon(
hostupdate.ihost_orig, force=force, ceph_helper=self._ceph)
def check_unlock_interfaces(self, hostupdate):
"""Semantic check for interfaces on host-unlock."""

View File

@ -623,20 +623,11 @@ def _check_host(stor):
raise wsme.exc.ClientSideError(_(
"System must have a %s backend" % constants.SB_TYPE_CEPH))
# semantic check: whether at least 2 unlocked hosts are monitors
# semantic check: whether host can be locked or force locked based on
# ceph monitors availability
if not cutils.is_aio_system(pecan.request.dbapi):
ceph_helper = ceph.CephApiOperator()
num_monitors, required_monitors, __ = \
ceph_helper.get_monitors_status(pecan.request.dbapi)
# CGTS 503 for now update monitors requirement until controller-0 is
# inventoried
# CGTS 1448
if num_monitors < required_monitors:
raise wsme.exc.ClientSideError(_(
"Only %d storage monitor available. "
"At least %s unlocked and enabled hosts with monitors are "
"required. Please ensure hosts with monitors are unlocked "
"and enabled.") % (num_monitors, required_monitors))
force = ihost['action'] == constants.FORCE_LOCK_ACTION
utils.check_node_lock_ceph_mon(ihost, force=force)
def _check_disk(stor):

View File

@ -35,6 +35,7 @@ import tsconfig.tsconfig as tsc
from oslo_config import cfg
from oslo_log import log
from sysinv._i18n import _
from sysinv.common import ceph
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import health
@ -582,6 +583,27 @@ def check_all_ceph_mon_growth(ceph_mon_gib, host=None):
cgtsvg_max_free_gib)
def check_node_lock_ceph_mon(ihost, force=False, ceph_helper=None):
if not ceph_helper:
ceph_helper = ceph.CephApiOperator()
num_monitors, required_monitors, active_monitors = \
ceph_helper.get_monitors_status(pecan.request.dbapi)
host_has_last_mon = (num_monitors == 1 and
ihost['hostname'] in active_monitors)
if (num_monitors - 1 < required_monitors and
not force or host_has_last_mon):
raise wsme.exc.ClientSideError(_(
"Only %d storage "
"monitor available. At least %d unlocked and "
"enabled hosts with monitors are required. Please"
" ensure hosts with monitors are unlocked and "
"enabled.") %
(num_monitors, required_monitors))
class SBApiHelper(object):
""" API Helper Class for manipulating Storage Backends.