Additional mechanism for unsafe force
In some scenarios, a force operation should not override a protective semantic check, even when --force is used. To provide a way to bypass those semantic checks completely, a new "--unsafe" option is introduced. Whenever an unsafe scenario is identified, with or without using --force, the following message is displayed in addition to the specific warning: "Use --force --unsafe if you wish to lock anyway." This change includes a bypass for the following scenario (only one identified so far): 3 hosts in the quorum: controller-0 unlocked and enabled controller-1 unlocked and enabled storage-0 unlocked and enabled Expected behavior: Storage-0 is locked Attempt to lock controller-1 (which is rejected) Attempt to --force lock controller-1 (which should be rejected) Attempt to --force --unsafe lock controller-1 (which is allowed) Test Plan: PASS: Fresh Install and Bootstrap (AIO-SX and Storage) PASS: Can't lock a controller when only 2 storage monitors are available PASS: Can't force lock a controller when only 2 storage monitors are available PASS: Successfully unsafe lock a controller when only 2 storage monitors are available Closes-bug: 2027685 Change-Id: I1d9a57c472d888b9ffc9bbe3acd87fd77f84fa52 Signed-off-by: Matheus Guilhermino <matheus.machadoguilhermino@windriver.com>
This commit is contained in:
parent
5d85297e3f
commit
b73ab54bdd
|
@ -343,11 +343,17 @@ def do_host_update(cc, args):
|
|||
action='store_true',
|
||||
default=False,
|
||||
help="Force a lock operation ")
|
||||
@utils.arg('-u', '--unsafe',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="Force an unsafe operation ")
|
||||
def do_host_lock(cc, args):
|
||||
"""Lock a host."""
|
||||
attributes = []
|
||||
|
||||
if args.force is True:
|
||||
if args.unsafe is True and args.force is True:
|
||||
# Unsafe forced lock operation
|
||||
attributes.append('action=force-unsafe-lock')
|
||||
elif args.force is True:
|
||||
# Forced lock operation
|
||||
attributes.append('action=force-lock')
|
||||
else:
|
||||
|
|
|
@ -902,6 +902,7 @@ class HostUpdate(object):
|
|||
(constants.FORCE_UNLOCK_ACTION, _("Force Unlocking")),
|
||||
(constants.LOCK_ACTION, _("Locking")),
|
||||
(constants.FORCE_LOCK_ACTION, _("Force Locking")),
|
||||
(constants.FORCE_UNSAFE_LOCK_ACTION, _("Unsafely Force Locking")),
|
||||
(constants.RESET_ACTION, _("Resetting")),
|
||||
(constants.REBOOT_ACTION, _("Rebooting")),
|
||||
(constants.REINSTALL_ACTION, _("Reinstalling")),
|
||||
|
@ -926,6 +927,7 @@ class HostUpdate(object):
|
|||
self._notify_vim_add_host = False
|
||||
self._notify_action_lock = False
|
||||
self._notify_action_lock_force = False
|
||||
self._notify_action_lock_force_unsafe = False
|
||||
self._skip_notify_mtce = False
|
||||
self._bm_type_changed_to_none = False
|
||||
self._nextstep = self.CONTINUE
|
||||
|
@ -1006,6 +1008,14 @@ class HostUpdate(object):
|
|||
def notify_action_lock_force(self, val):
|
||||
self._notify_action_lock_force = val
|
||||
|
||||
@property
|
||||
def notify_action_lock_force_unsafe(self):
|
||||
return self._notify_action_lock_force_unsafe
|
||||
|
||||
@notify_action_lock_force_unsafe.setter
|
||||
def notify_action_lock_force_unsafe(self, val):
|
||||
self._notify_action_lock_force_unsafe = val
|
||||
|
||||
@property
|
||||
def ihost_val_prenotify(self):
|
||||
return self._ihost_val_prenotify
|
||||
|
@ -2153,7 +2163,8 @@ class HostController(rest.RestController):
|
|||
LOG.warn(_("No response vim_api %s on action=%s e=%s" %
|
||||
(ihost_obj['hostname'], action, e)))
|
||||
self._api_token = None
|
||||
if action == constants.FORCE_LOCK_ACTION:
|
||||
if (action == constants.FORCE_LOCK_ACTION or
|
||||
action == constants.FORCE_UNSAFE_LOCK_ACTION):
|
||||
pass
|
||||
else:
|
||||
# reject continuation if VIM rejects action
|
||||
|
@ -2182,7 +2193,7 @@ class HostController(rest.RestController):
|
|||
# Evaluate app reapply on lock/unlock/swact/reinstall
|
||||
if (not os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG) and
|
||||
patched_ihost.get('action') in
|
||||
[constants.LOCK_ACTION, constants.FORCE_LOCK_ACTION,
|
||||
[constants.LOCK_ACTION, constants.FORCE_LOCK_ACTION, constants.FORCE_UNSAFE_LOCK_ACTION,
|
||||
constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION,
|
||||
constants.SWACT_ACTION, constants.FORCE_SWACT_ACTION,
|
||||
constants.REINSTALL_ACTION]):
|
||||
|
@ -2220,6 +2231,8 @@ class HostController(rest.RestController):
|
|||
new_ihost_mtc['action'] = constants.LOCK_ACTION
|
||||
elif hostupdate.notify_action_lock_force:
|
||||
new_ihost_mtc['action'] = constants.FORCE_LOCK_ACTION
|
||||
elif hostupdate.notify_action_lock_force_unsafe:
|
||||
new_ihost_mtc['action'] = constants.FORCE_UNSAFE_LOCK_ACTION
|
||||
elif myaction == constants.FORCE_UNLOCK_ACTION:
|
||||
new_ihost_mtc['action'] = constants.UNLOCK_ACTION
|
||||
|
||||
|
@ -2485,7 +2498,7 @@ class HostController(rest.RestController):
|
|||
if (personality is not None and
|
||||
personality.find(constants.STORAGE_HOSTNAME) != -1 and
|
||||
not skip_ceph_checks):
|
||||
utils.check_node_lock_ceph_mon(ihost, force=True, ceph_helper=self._ceph)
|
||||
utils.check_node_lock_ceph_mon(ihost, unsafe=True, ceph_helper=self._ceph)
|
||||
|
||||
# If it is the last storage node to delete, we need to delete
|
||||
# ceph osd pools and update additional tier status to "defined"
|
||||
|
@ -4756,6 +4769,7 @@ class HostController(rest.RestController):
|
|||
constants.FORCE_UNLOCK_ACTION,
|
||||
constants.LOCK_ACTION,
|
||||
constants.FORCE_LOCK_ACTION,
|
||||
constants.FORCE_UNSAFE_LOCK_ACTION,
|
||||
constants.SWACT_ACTION,
|
||||
constants.FORCE_SWACT_ACTION,
|
||||
constants.RESET_ACTION,
|
||||
|
@ -4825,6 +4839,9 @@ class HostController(rest.RestController):
|
|||
elif action == constants.FORCE_LOCK_ACTION:
|
||||
if self.check_force_lock(hostupdate):
|
||||
rc = self.update_ihost_action(action, hostupdate)
|
||||
elif action == constants.FORCE_UNSAFE_LOCK_ACTION:
|
||||
if self.check_force_unsafe_lock(hostupdate):
|
||||
rc = self.update_ihost_action(action, hostupdate)
|
||||
elif action == constants.SWACT_ACTION:
|
||||
self.check_swact(hostupdate)
|
||||
elif action == constants.FORCE_SWACT_ACTION:
|
||||
|
@ -5188,6 +5205,8 @@ class HostController(rest.RestController):
|
|||
preval = {'ihost_action': ''}
|
||||
elif action == constants.FORCE_LOCK_ACTION:
|
||||
preval = {'ihost_action': constants.FORCE_LOCK_ACTION}
|
||||
elif action == constants.FORCE_UNSAFE_LOCK_ACTION:
|
||||
preval = {'ihost_action': constants.FORCE_UNSAFE_LOCK_ACTION}
|
||||
elif action == constants.LOCK_ACTION:
|
||||
preval = {'ihost_action': constants.LOCK_ACTION}
|
||||
elif (action == constants.UNLOCK_ACTION or
|
||||
|
@ -5468,7 +5487,8 @@ class HostController(rest.RestController):
|
|||
if hostupdate.ihost_orig['administrative'] == constants.ADMIN_UNLOCKED:
|
||||
host_action = hostupdate.ihost_orig['ihost_action'] or ""
|
||||
if (host_action.startswith(constants.LOCK_ACTION) or
|
||||
host_action.startswith(constants.FORCE_LOCK_ACTION)):
|
||||
host_action.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
host_action.startswith(constants.FORCE_UNSAFE_LOCK_ACTION)):
|
||||
raise exception.HostLocking(
|
||||
host=hostupdate.ihost_orig['hostname'],
|
||||
action=host_action.strip('-'))
|
||||
|
@ -5663,6 +5683,16 @@ class HostController(rest.RestController):
|
|||
|
||||
return True
|
||||
|
||||
def check_force_unsafe_lock(self, hostupdate):
|
||||
# personality specific lock checks
|
||||
personality = hostupdate.ihost_patch.get('personality')
|
||||
if personality == constants.CONTROLLER:
|
||||
self.check_lock_controller(hostupdate, force=True, unsafe=True)
|
||||
|
||||
elif personality == constants.STORAGE:
|
||||
self.check_lock_storage(hostupdate, force=True, unsafe=True)
|
||||
return True
|
||||
|
||||
def check_force_lock(self, hostupdate):
|
||||
# personality specific lock checks
|
||||
personality = hostupdate.ihost_patch.get('personality')
|
||||
|
@ -5673,7 +5703,7 @@ class HostController(rest.RestController):
|
|||
self.check_lock_storage(hostupdate, force=True)
|
||||
return True
|
||||
|
||||
def check_lock_controller(self, hostupdate, force=False):
|
||||
def check_lock_controller(self, hostupdate, force=False, unsafe=False):
|
||||
"""Pre lock semantic checks for controller"""
|
||||
|
||||
LOG.info("%s ihost check_lock_controller" % hostupdate.displayid)
|
||||
|
@ -5726,13 +5756,18 @@ class HostController(rest.RestController):
|
|||
# never happen.
|
||||
return
|
||||
|
||||
# TODO(oponcea) remove once SM supports in-service config reload
|
||||
# Allow locking controllers when all storage nodes are locked.
|
||||
# Do not allow locking controllers when all storage nodes are locked.
|
||||
# Allow lock if unsafe=True
|
||||
if stor_model == constants.CEPH_STORAGE_MODEL:
|
||||
for node in st_nodes:
|
||||
if (node['administrative'] == constants.ADMIN_UNLOCKED):
|
||||
break
|
||||
else:
|
||||
if not unsafe:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Rejected: Can not lock host %s when storage nodes are locked "
|
||||
"Use --force_unsafe if you wish to lock anyway."
|
||||
% hostupdate.displayid))
|
||||
return
|
||||
|
||||
if (hostupdate.ihost_orig['administrative'] ==
|
||||
|
@ -5742,7 +5777,7 @@ class HostController(rest.RestController):
|
|||
# If the node is unlocked and enabled we need to check that we
|
||||
# have enough storage monitors.
|
||||
utils.check_node_lock_ceph_mon(
|
||||
hostupdate.ihost_orig, force=force, ceph_helper=self._ceph)
|
||||
hostupdate.ihost_orig, unsafe=unsafe, ceph_helper=self._ceph)
|
||||
|
||||
if not force:
|
||||
# sm-lock-pre-check
|
||||
|
@ -6352,7 +6387,7 @@ class HostController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(
|
||||
_("Swact action not allowed. %s apply is in progress." % _app.name))
|
||||
|
||||
def check_lock_storage(self, hostupdate, force=False):
|
||||
def check_lock_storage(self, hostupdate, force=False, unsafe=False):
|
||||
"""Pre lock semantic checks for storage"""
|
||||
LOG.info("%s ihost check_lock_storage" % hostupdate.displayid)
|
||||
|
||||
|
@ -6375,7 +6410,7 @@ class HostController(rest.RestController):
|
|||
hostupdate.ihost_orig['operational'] ==
|
||||
constants.OPERATIONAL_ENABLED):
|
||||
utils.check_node_lock_ceph_mon(
|
||||
hostupdate.ihost_orig, force=force, ceph_helper=self._ceph)
|
||||
hostupdate.ihost_orig, unsafe=unsafe, ceph_helper=self._ceph)
|
||||
|
||||
storage_nodes = pecan.request.dbapi.ihost_get_by_personality(
|
||||
constants.STORAGE)
|
||||
|
@ -6389,7 +6424,8 @@ class HostController(rest.RestController):
|
|||
ihost_action_locking = False
|
||||
ihost_action = node['ihost_action'] or ""
|
||||
|
||||
if (ihost_action.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
if (ihost_action.startswith(constants.FORCE_UNSAFE_LOCK_ACTION) or
|
||||
ihost_action.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
ihost_action.startswith(constants.LOCK_ACTION)):
|
||||
ihost_action_locking = True
|
||||
|
||||
|
@ -6447,7 +6483,7 @@ class HostController(rest.RestController):
|
|||
"and replication is lost. This may result in data loss. ")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
def check_lock_worker(self, hostupdate, force=False):
|
||||
def check_lock_worker(self, hostupdate, force=False, unsafe=False):
|
||||
"""Pre lock semantic checks for worker"""
|
||||
|
||||
hostname = hostupdate.ihost_patch.get('hostname')
|
||||
|
@ -6520,7 +6556,7 @@ class HostController(rest.RestController):
|
|||
hostupdate.ihost_orig['operational'] ==
|
||||
constants.OPERATIONAL_ENABLED):
|
||||
utils.check_node_lock_ceph_mon(
|
||||
hostupdate.ihost_orig, force=force, ceph_helper=self._ceph)
|
||||
hostupdate.ihost_orig, unsafe=unsafe, ceph_helper=self._ceph)
|
||||
|
||||
def check_unlock_interfaces(self, hostupdate):
|
||||
"""Semantic check for interfaces on host-unlock."""
|
||||
|
@ -6690,6 +6726,8 @@ class HostController(rest.RestController):
|
|||
self._handle_lock_action(hostupdate)
|
||||
elif action == constants.FORCE_LOCK_ACTION:
|
||||
self._handle_force_lock_action(hostupdate)
|
||||
elif action == constants.FORCE_UNSAFE_LOCK_ACTION:
|
||||
self._handle_force_unsafe_lock_action(hostupdate)
|
||||
elif action == constants.SWACT_ACTION:
|
||||
self._stage_swact(hostupdate)
|
||||
elif action == constants.FORCE_SWACT_ACTION:
|
||||
|
@ -6861,7 +6899,8 @@ class HostController(rest.RestController):
|
|||
|
||||
ihost_task_string = ihost['ihost_action'] or ""
|
||||
if ((ihost_task_string.startswith(constants.LOCK_ACTION) or
|
||||
ihost_task_string.startswith(constants.FORCE_LOCK_ACTION)) and
|
||||
ihost_task_string.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
ihost_task_string.startswith(constants.FORCE_UNSAFE_LOCK_ACTION)) and
|
||||
ihost['administrative'] != constants.ADMIN_LOCKED):
|
||||
# passed - skip reset for force-lock
|
||||
# iHost['ihost_action'] = constants.LOCK_ACTION
|
||||
|
@ -6929,6 +6968,10 @@ class HostController(rest.RestController):
|
|||
# allow mtce to reset the host
|
||||
hostupdate.notify_mtce = True
|
||||
hostupdate.notify_action_lock_force = True
|
||||
elif ihost_task_string.startswith(constants.FORCE_UNSAFE_LOCK_ACTION):
|
||||
# allow mtce to reset the host
|
||||
hostupdate.notify_mtce = True
|
||||
hostupdate.notify_action_lock_force_unsafe = True
|
||||
else:
|
||||
hostupdate.skip_notify_mtce = True
|
||||
LOG.warn("%s Skipping vim services disable notification task=%s" %
|
||||
|
@ -7029,6 +7072,17 @@ class HostController(rest.RestController):
|
|||
hostupdate.ihost_val_prenotify_update(val)
|
||||
hostupdate.ihost_val.update(val)
|
||||
|
||||
@staticmethod
|
||||
def _handle_force_unsafe_lock_action(hostupdate):
|
||||
"""Handle host-force-unsafe-lock action."""
|
||||
LOG.info("%s _handle_force_unsafe_lock_action" % hostupdate.displayid)
|
||||
|
||||
hostupdate.notify_vim_action = True
|
||||
hostupdate.skip_notify_mtce = True
|
||||
val = {'ihost_action': constants.FORCE_UNSAFE_LOCK_ACTION}
|
||||
hostupdate.ihost_val_prenotify_update(val)
|
||||
hostupdate.ihost_val.update(val)
|
||||
|
||||
@staticmethod
|
||||
def _check_patch_requirements(region_name,
|
||||
applied_patches=None,
|
||||
|
|
|
@ -626,11 +626,11 @@ def _check_host(stor):
|
|||
raise wsme.exc.ClientSideError(_(
|
||||
"System must have a %s backend" % constants.SB_TYPE_CEPH))
|
||||
|
||||
# semantic check: whether host can be locked or force locked based on
|
||||
# semantic check: whether host can be locked or unsafely force locked based on
|
||||
# ceph monitors availability
|
||||
if not cutils.is_aio_system(pecan.request.dbapi):
|
||||
force = ihost['action'] == constants.FORCE_LOCK_ACTION
|
||||
utils.check_node_lock_ceph_mon(ihost, force=force)
|
||||
unsafe = ihost['action'] == constants.FORCE_UNSAFE_LOCK_ACTION
|
||||
utils.check_node_lock_ceph_mon(ihost, unsafe=unsafe)
|
||||
|
||||
|
||||
def _check_disk(stor):
|
||||
|
|
|
@ -583,7 +583,7 @@ def check_all_ceph_mon_growth(ceph_mon_gib, host=None):
|
|||
cgtsvg_max_free_gib)
|
||||
|
||||
|
||||
def check_node_lock_ceph_mon(ihost, force=False, ceph_helper=None):
|
||||
def check_node_lock_ceph_mon(ihost, unsafe=False, ceph_helper=None):
|
||||
if not ceph_helper:
|
||||
ceph_helper = ceph.CephApiOperator()
|
||||
|
||||
|
@ -594,13 +594,14 @@ def check_node_lock_ceph_mon(ihost, force=False, ceph_helper=None):
|
|||
|
||||
if (num_monitors <= required_monitors
|
||||
and is_in_active_monitors
|
||||
and not force):
|
||||
and not unsafe):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Only %d storage "
|
||||
"monitor available. At least %d unlocked and "
|
||||
"enabled hosts with monitors are required. Please"
|
||||
" ensure hosts with monitors are unlocked and "
|
||||
"enabled.") %
|
||||
"enabled."
|
||||
"Use --force --unsafe if you wish to lock anyway.") %
|
||||
(num_monitors, required_monitors))
|
||||
|
||||
|
||||
|
|
|
@ -101,7 +101,8 @@ def vim_host_action(token, uuid, hostname, action, timeout):
|
|||
response = None
|
||||
_valid_actions = [constants.UNLOCK_ACTION,
|
||||
constants.LOCK_ACTION,
|
||||
constants.FORCE_LOCK_ACTION]
|
||||
constants.FORCE_LOCK_ACTION,
|
||||
constants.FORCE_UNSAFE_LOCK_ACTION]
|
||||
|
||||
if action not in _valid_actions:
|
||||
LOG.error("Unrecognized vim_host_action=%s" % action)
|
||||
|
|
|
@ -830,7 +830,8 @@ class CephApiOperator(object):
|
|||
ihost = db_api.ihost_get(mon['forihostid'])
|
||||
host_action = ihost['ihost_action'] or ""
|
||||
locking = (host_action.startswith(constants.LOCK_ACTION) or
|
||||
host_action.startswith(constants.FORCE_LOCK_ACTION))
|
||||
host_action.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
host_action.startswith(constants.FORCE_UNSAFE_LOCK_ACTION))
|
||||
if (ihost['administrative'] == constants.ADMIN_UNLOCKED and
|
||||
ihost['operational'] == constants.OPERATIONAL_ENABLED and
|
||||
not locking):
|
||||
|
|
|
@ -74,6 +74,7 @@ UNLOCK_ACTION = 'unlock'
|
|||
FORCE_UNLOCK_ACTION = 'force-unlock'
|
||||
LOCK_ACTION = 'lock'
|
||||
FORCE_LOCK_ACTION = 'force-lock'
|
||||
FORCE_UNSAFE_LOCK_ACTION = 'force-unsafe-lock'
|
||||
REBOOT_ACTION = 'reboot'
|
||||
RESET_ACTION = 'reset'
|
||||
REINSTALL_ACTION = 'reinstall'
|
||||
|
@ -108,7 +109,8 @@ MTCE_ACTIONS = [REBOOT_ACTION,
|
|||
|
||||
# These go to VIM First
|
||||
VIM_ACTIONS = [LOCK_ACTION,
|
||||
FORCE_LOCK_ACTION]
|
||||
FORCE_LOCK_ACTION,
|
||||
FORCE_UNSAFE_LOCK_ACTION]
|
||||
|
||||
CONFIG_ACTIONS = [SUBFUNCTION_CONFIG_ACTION]
|
||||
|
||||
|
@ -167,6 +169,7 @@ ADMIN_UNLOCKED = 'unlocked'
|
|||
ADMIN_LOCKED = 'locked'
|
||||
LOCKING = 'Locking'
|
||||
FORCE_LOCKING = "Force Locking"
|
||||
UNSAFELY_FORCE_LOCKING = "Unsafely Force Locking"
|
||||
OPERATIONAL_ENABLED = 'enabled'
|
||||
OPERATIONAL_DISABLED = 'disabled'
|
||||
|
||||
|
@ -1962,6 +1965,7 @@ APP_EVALUATE_REAPPLY_TYPE_HOST_DELETE = 'host-delete'
|
|||
APP_EVALUATE_REAPPLY_TYPE_HOST_REINSTALL = REINSTALL_ACTION
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_LOCK = LOCK_ACTION
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_LOCK = FORCE_LOCK_ACTION
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_UNSAFE_LOCK = FORCE_UNSAFE_LOCK_ACTION
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_UNLOCK = UNLOCK_ACTION
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_UNLOCK = FORCE_UNLOCK_ACTION
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_SWACT = SWACT_ACTION
|
||||
|
@ -1983,6 +1987,8 @@ APP_EVALUATE_REAPPLY_TRIGGER_TO_METADATA_MAP = {
|
|||
APP_EVALUATE_REAPPLY_TYPE_HOST_LOCK,
|
||||
FORCE_LOCK_ACTION:
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_LOCK,
|
||||
FORCE_UNSAFE_LOCK_ACTION:
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_UNSAFE_LOCK,
|
||||
SWACT_ACTION:
|
||||
APP_EVALUATE_REAPPLY_TYPE_HOST_SWACT,
|
||||
FORCE_SWACT_ACTION:
|
||||
|
|
|
@ -6277,11 +6277,15 @@ class ConductorManager(service.PeriodicService):
|
|||
if ihost.administrative == constants.ADMIN_UNLOCKED:
|
||||
ihost_action_str = ihost.ihost_action or ""
|
||||
|
||||
if (ihost_action_str.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
ihost_action_str.startswith(constants.LOCK_ACTION)):
|
||||
if (ihost_action_str.startswith(constants.FORCE_UNSAFE_LOCK_ACTION) or
|
||||
ihost_action_str.startswith(constants.FORCE_LOCK_ACTION) or
|
||||
ihost_action_str.startswith(constants.LOCK_ACTION)):
|
||||
|
||||
task_str = ihost.task or ""
|
||||
if (('--' in ihost_action_str and
|
||||
ihost_action_str.startswith(
|
||||
constants.FORCE_UNSAFE_LOCK_ACTION)) or
|
||||
('--' in ihost_action_str and
|
||||
ihost_action_str.startswith(
|
||||
constants.FORCE_LOCK_ACTION)) or
|
||||
('----------' in ihost_action_str and
|
||||
|
@ -6303,14 +6307,26 @@ class ConductorManager(service.PeriodicService):
|
|||
self._api_token, self._mtc_address, self._mtc_port,
|
||||
ihost_mtc, timeout_in_secs)
|
||||
|
||||
elif ihost_action_str.startswith(constants.FORCE_UNSAFE_LOCK_ACTION):
|
||||
timeout_in_secs = 6
|
||||
ihost_mtc['operation'] = 'modify'
|
||||
ihost_mtc['action'] = constants.FORCE_LOCK_ACTION
|
||||
ihost_mtc['task'] = constants.FORCE_LOCKING
|
||||
LOG.warn("ihost_action override %s" %
|
||||
ihost_mtc)
|
||||
mtce_api.host_modify(
|
||||
self._api_token, self._mtc_address, self._mtc_port,
|
||||
ihost_mtc, timeout_in_secs)
|
||||
|
||||
# need time for FORCE_LOCK mtce to clear
|
||||
if ('----' in ihost_action_str):
|
||||
ihost_action_str = ""
|
||||
else:
|
||||
ihost_action_str += "-"
|
||||
|
||||
if (task_str.startswith(constants.FORCE_LOCKING) or
|
||||
task_str.startswith(constants.LOCKING)):
|
||||
if (task_str.startswith(constants.UNSAFELY_FORCE_LOCKING) or
|
||||
task_str.startswith(constants.FORCE_LOCKING) or
|
||||
task_str.startswith(constants.LOCKING)):
|
||||
val = {'task': "",
|
||||
'ihost_action': ihost_action_str,
|
||||
'vim_progress_status': ""}
|
||||
|
@ -6319,8 +6335,9 @@ class ConductorManager(service.PeriodicService):
|
|||
'vim_progress_status': ""}
|
||||
else:
|
||||
ihost_action_str += "-"
|
||||
if (task_str.startswith(constants.FORCE_LOCKING) or
|
||||
task_str.startswith(constants.LOCKING)):
|
||||
if (task_str.startswith(constants.UNSAFELY_FORCE_LOCKING) or
|
||||
task_str.startswith(constants.FORCE_LOCKING) or
|
||||
task_str.startswith(constants.LOCKING)):
|
||||
task_str += "-"
|
||||
val = {'task': task_str,
|
||||
'ihost_action': ihost_action_str}
|
||||
|
@ -6330,8 +6347,9 @@ class ConductorManager(service.PeriodicService):
|
|||
self.dbapi.ihost_update(ihost.uuid, val)
|
||||
else: # Administrative locked already
|
||||
task_str = ihost.task or ""
|
||||
if (task_str.startswith(constants.FORCE_LOCKING) or
|
||||
task_str.startswith(constants.LOCKING)):
|
||||
if (task_str.startswith(constants.UNSAFELY_FORCE_LOCKING) or
|
||||
task_str.startswith(constants.FORCE_LOCKING) or
|
||||
task_str.startswith(constants.LOCKING)):
|
||||
val = {'task': ""}
|
||||
self.dbapi.ihost_update(ihost.uuid, val)
|
||||
|
||||
|
|
|
@ -2510,6 +2510,47 @@ class TestPatch(TestHost):
|
|||
# Verify that the apps reapply was called
|
||||
self.fake_conductor_api.evaluate_apps_reapply.assert_called_once()
|
||||
|
||||
def test_unsafe_force_lock_action_controller(self):
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create controller-1
|
||||
c1_host = self._create_controller_1(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Lock host
|
||||
response = self._patch_host_action(c1_host['hostname'],
|
||||
constants.FORCE_UNSAFE_LOCK_ACTION,
|
||||
'sysinv-test')
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
|
||||
# Verify that the SM lock pre check was not done
|
||||
self.mock_sm_api_lock_pre_check.assert_not_called()
|
||||
# Verify that the force lock was sent to the VIM
|
||||
self.mock_vim_api_host_action.assert_called_with(
|
||||
mock.ANY,
|
||||
c1_host['uuid'],
|
||||
c1_host['hostname'],
|
||||
constants.FORCE_UNSAFE_LOCK_ACTION,
|
||||
mock.ANY)
|
||||
# Verify that the host was not configured
|
||||
self.fake_conductor_api.configure_ihost.assert_not_called()
|
||||
# Verify that the host was not modified in maintenance
|
||||
self.mock_mtce_api_host_modify.assert_not_called()
|
||||
# Verify that the host action was cleared
|
||||
result = self.get_json('/ihosts/%s' % c1_host['hostname'])
|
||||
self.assertEqual(constants.NONE_ACTION, result['action'])
|
||||
# Verify that the apps reapply was called
|
||||
self.fake_conductor_api.evaluate_apps_reapply.assert_called_once()
|
||||
|
||||
def test_unlock_action_controller_while_upgrading_kubelet(self):
|
||||
# Create controller-0
|
||||
c0_host = self._create_controller_0(
|
||||
|
|
Loading…
Reference in New Issue