From b73ab54bddfc0f532a94b9c5bbf216b8befea651 Mon Sep 17 00:00:00 2001 From: Matheus Guilhermino Date: Tue, 11 Jul 2023 10:19:43 -0300 Subject: [PATCH] Additional mechanism for unsafe force In some scenarios, a force operation should not override a protective semantic check, even when --force is used. To provide a way to bypass those semantic checks completely, a new "--unsafe" option is introduced. Whenever an unsafe scenario is identified, with or without using --force, the following message is displayed in addition to the specific warning: "Use --force --unsafe if you wish to lock anyway." This change includes a bypass for the following scenario (only one identified so far): 3 hosts in the quorum: controller-0 unlocked and enabled controller-1 unlocked and enabled storage-0 unlocked and enabled Expected behavior: Storage-0 is locked Attempt to lock controller-1 (which is rejected) Attempt to --force lock controller-1 (which should be rejected) Attempt to --force --unsafe lock controller-1 (which is allowed) Test Plan: PASS: Fresh Install and Bootstrap (AIO-SX and Storage) PASS: Can't lock a controller when only 2 storage monitors are available PASS: Can't force lock a controller when only 2 storage monitors are available PASS: Successfully unsafe lock a controller when only 2 storage monitors are available Closes-bug: 2027685 Change-Id: I1d9a57c472d888b9ffc9bbe3acd87fd77f84fa52 Signed-off-by: Matheus Guilhermino --- .../cgts-client/cgtsclient/v1/iHost_shell.py | 10 ++- .../sysinv/sysinv/api/controllers/v1/host.py | 82 +++++++++++++++---- .../sysinv/api/controllers/v1/storage.py | 6 +- .../sysinv/sysinv/api/controllers/v1/utils.py | 7 +- .../sysinv/api/controllers/v1/vim_api.py | 3 +- sysinv/sysinv/sysinv/sysinv/common/ceph.py | 3 +- .../sysinv/sysinv/sysinv/common/constants.py | 8 +- .../sysinv/sysinv/sysinv/conductor/manager.py | 34 ++++++-- .../sysinv/sysinv/tests/api/test_host.py | 41 ++++++++++ 9 files changed, 161 insertions(+), 33 deletions(-) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py index e976d8a664..b3dabed330 100755 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py @@ -343,11 +343,17 @@ def do_host_update(cc, args): action='store_true', default=False, help="Force a lock operation ") +@utils.arg('-u', '--unsafe', + action='store_true', + default=False, + help="Force an unsafe operation ") def do_host_lock(cc, args): """Lock a host.""" attributes = [] - - if args.force is True: + if args.unsafe is True and args.force is True: + # Unsafe forced lock operation + attributes.append('action=force-unsafe-lock') + elif args.force is True: # Forced lock operation attributes.append('action=force-lock') else: diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 8ef6ee0bf0..b73fa8814f 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -902,6 +902,7 @@ class HostUpdate(object): (constants.FORCE_UNLOCK_ACTION, _("Force Unlocking")), (constants.LOCK_ACTION, _("Locking")), (constants.FORCE_LOCK_ACTION, _("Force Locking")), + (constants.FORCE_UNSAFE_LOCK_ACTION, _("Unsafely Force Locking")), (constants.RESET_ACTION, _("Resetting")), (constants.REBOOT_ACTION, _("Rebooting")), (constants.REINSTALL_ACTION, _("Reinstalling")), @@ -926,6 +927,7 @@ class HostUpdate(object): self._notify_vim_add_host = False self._notify_action_lock = False self._notify_action_lock_force = False + self._notify_action_lock_force_unsafe = False self._skip_notify_mtce = False self._bm_type_changed_to_none = False self._nextstep = self.CONTINUE @@ -1006,6 +1008,14 @@ class HostUpdate(object): def notify_action_lock_force(self, val): self._notify_action_lock_force = val + @property + def notify_action_lock_force_unsafe(self): + return self._notify_action_lock_force_unsafe + + @notify_action_lock_force_unsafe.setter + def notify_action_lock_force_unsafe(self, val): + self._notify_action_lock_force_unsafe = val + @property def ihost_val_prenotify(self): return self._ihost_val_prenotify @@ -2153,7 +2163,8 @@ class HostController(rest.RestController): LOG.warn(_("No response vim_api %s on action=%s e=%s" % (ihost_obj['hostname'], action, e))) self._api_token = None - if action == constants.FORCE_LOCK_ACTION: + if (action == constants.FORCE_LOCK_ACTION or + action == constants.FORCE_UNSAFE_LOCK_ACTION): pass else: # reject continuation if VIM rejects action @@ -2182,7 +2193,7 @@ class HostController(rest.RestController): # Evaluate app reapply on lock/unlock/swact/reinstall if (not os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG) and patched_ihost.get('action') in - [constants.LOCK_ACTION, constants.FORCE_LOCK_ACTION, + [constants.LOCK_ACTION, constants.FORCE_LOCK_ACTION, constants.FORCE_UNSAFE_LOCK_ACTION, constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION, constants.SWACT_ACTION, constants.FORCE_SWACT_ACTION, constants.REINSTALL_ACTION]): @@ -2220,6 +2231,8 @@ class HostController(rest.RestController): new_ihost_mtc['action'] = constants.LOCK_ACTION elif hostupdate.notify_action_lock_force: new_ihost_mtc['action'] = constants.FORCE_LOCK_ACTION + elif hostupdate.notify_action_lock_force_unsafe: + new_ihost_mtc['action'] = constants.FORCE_UNSAFE_LOCK_ACTION elif myaction == constants.FORCE_UNLOCK_ACTION: new_ihost_mtc['action'] = constants.UNLOCK_ACTION @@ -2485,7 +2498,7 @@ class HostController(rest.RestController): if (personality is not None and personality.find(constants.STORAGE_HOSTNAME) != -1 and not skip_ceph_checks): - utils.check_node_lock_ceph_mon(ihost, force=True, ceph_helper=self._ceph) + utils.check_node_lock_ceph_mon(ihost, unsafe=True, ceph_helper=self._ceph) # If it is the last storage node to delete, we need to delete # ceph osd pools and update additional tier status to "defined" @@ -4756,6 +4769,7 @@ class HostController(rest.RestController): constants.FORCE_UNLOCK_ACTION, constants.LOCK_ACTION, constants.FORCE_LOCK_ACTION, + constants.FORCE_UNSAFE_LOCK_ACTION, constants.SWACT_ACTION, constants.FORCE_SWACT_ACTION, constants.RESET_ACTION, @@ -4825,6 +4839,9 @@ class HostController(rest.RestController): elif action == constants.FORCE_LOCK_ACTION: if self.check_force_lock(hostupdate): rc = self.update_ihost_action(action, hostupdate) + elif action == constants.FORCE_UNSAFE_LOCK_ACTION: + if self.check_force_unsafe_lock(hostupdate): + rc = self.update_ihost_action(action, hostupdate) elif action == constants.SWACT_ACTION: self.check_swact(hostupdate) elif action == constants.FORCE_SWACT_ACTION: @@ -5188,6 +5205,8 @@ class HostController(rest.RestController): preval = {'ihost_action': ''} elif action == constants.FORCE_LOCK_ACTION: preval = {'ihost_action': constants.FORCE_LOCK_ACTION} + elif action == constants.FORCE_UNSAFE_LOCK_ACTION: + preval = {'ihost_action': constants.FORCE_UNSAFE_LOCK_ACTION} elif action == constants.LOCK_ACTION: preval = {'ihost_action': constants.LOCK_ACTION} elif (action == constants.UNLOCK_ACTION or @@ -5468,7 +5487,8 @@ class HostController(rest.RestController): if hostupdate.ihost_orig['administrative'] == constants.ADMIN_UNLOCKED: host_action = hostupdate.ihost_orig['ihost_action'] or "" if (host_action.startswith(constants.LOCK_ACTION) or - host_action.startswith(constants.FORCE_LOCK_ACTION)): + host_action.startswith(constants.FORCE_LOCK_ACTION) or + host_action.startswith(constants.FORCE_UNSAFE_LOCK_ACTION)): raise exception.HostLocking( host=hostupdate.ihost_orig['hostname'], action=host_action.strip('-')) @@ -5663,6 +5683,16 @@ class HostController(rest.RestController): return True + def check_force_unsafe_lock(self, hostupdate): + # personality specific lock checks + personality = hostupdate.ihost_patch.get('personality') + if personality == constants.CONTROLLER: + self.check_lock_controller(hostupdate, force=True, unsafe=True) + + elif personality == constants.STORAGE: + self.check_lock_storage(hostupdate, force=True, unsafe=True) + return True + def check_force_lock(self, hostupdate): # personality specific lock checks personality = hostupdate.ihost_patch.get('personality') @@ -5673,7 +5703,7 @@ class HostController(rest.RestController): self.check_lock_storage(hostupdate, force=True) return True - def check_lock_controller(self, hostupdate, force=False): + def check_lock_controller(self, hostupdate, force=False, unsafe=False): """Pre lock semantic checks for controller""" LOG.info("%s ihost check_lock_controller" % hostupdate.displayid) @@ -5726,13 +5756,18 @@ class HostController(rest.RestController): # never happen. return - # TODO(oponcea) remove once SM supports in-service config reload - # Allow locking controllers when all storage nodes are locked. + # Do not allow locking controllers when all storage nodes are locked. + # Allow lock if unsafe=True if stor_model == constants.CEPH_STORAGE_MODEL: for node in st_nodes: if (node['administrative'] == constants.ADMIN_UNLOCKED): break else: + if not unsafe: + raise wsme.exc.ClientSideError( + _("Rejected: Can not lock host %s when storage nodes are locked " + "Use --force_unsafe if you wish to lock anyway." + % hostupdate.displayid)) return if (hostupdate.ihost_orig['administrative'] == @@ -5742,7 +5777,7 @@ class HostController(rest.RestController): # If the node is unlocked and enabled we need to check that we # have enough storage monitors. utils.check_node_lock_ceph_mon( - hostupdate.ihost_orig, force=force, ceph_helper=self._ceph) + hostupdate.ihost_orig, unsafe=unsafe, ceph_helper=self._ceph) if not force: # sm-lock-pre-check @@ -6352,7 +6387,7 @@ class HostController(rest.RestController): raise wsme.exc.ClientSideError( _("Swact action not allowed. %s apply is in progress." % _app.name)) - def check_lock_storage(self, hostupdate, force=False): + def check_lock_storage(self, hostupdate, force=False, unsafe=False): """Pre lock semantic checks for storage""" LOG.info("%s ihost check_lock_storage" % hostupdate.displayid) @@ -6375,7 +6410,7 @@ class HostController(rest.RestController): hostupdate.ihost_orig['operational'] == constants.OPERATIONAL_ENABLED): utils.check_node_lock_ceph_mon( - hostupdate.ihost_orig, force=force, ceph_helper=self._ceph) + hostupdate.ihost_orig, unsafe=unsafe, ceph_helper=self._ceph) storage_nodes = pecan.request.dbapi.ihost_get_by_personality( constants.STORAGE) @@ -6389,7 +6424,8 @@ class HostController(rest.RestController): ihost_action_locking = False ihost_action = node['ihost_action'] or "" - if (ihost_action.startswith(constants.FORCE_LOCK_ACTION) or + if (ihost_action.startswith(constants.FORCE_UNSAFE_LOCK_ACTION) or + ihost_action.startswith(constants.FORCE_LOCK_ACTION) or ihost_action.startswith(constants.LOCK_ACTION)): ihost_action_locking = True @@ -6447,7 +6483,7 @@ class HostController(rest.RestController): "and replication is lost. This may result in data loss. ") raise wsme.exc.ClientSideError(msg) - def check_lock_worker(self, hostupdate, force=False): + def check_lock_worker(self, hostupdate, force=False, unsafe=False): """Pre lock semantic checks for worker""" hostname = hostupdate.ihost_patch.get('hostname') @@ -6520,7 +6556,7 @@ class HostController(rest.RestController): hostupdate.ihost_orig['operational'] == constants.OPERATIONAL_ENABLED): utils.check_node_lock_ceph_mon( - hostupdate.ihost_orig, force=force, ceph_helper=self._ceph) + hostupdate.ihost_orig, unsafe=unsafe, ceph_helper=self._ceph) def check_unlock_interfaces(self, hostupdate): """Semantic check for interfaces on host-unlock.""" @@ -6690,6 +6726,8 @@ class HostController(rest.RestController): self._handle_lock_action(hostupdate) elif action == constants.FORCE_LOCK_ACTION: self._handle_force_lock_action(hostupdate) + elif action == constants.FORCE_UNSAFE_LOCK_ACTION: + self._handle_force_unsafe_lock_action(hostupdate) elif action == constants.SWACT_ACTION: self._stage_swact(hostupdate) elif action == constants.FORCE_SWACT_ACTION: @@ -6861,7 +6899,8 @@ class HostController(rest.RestController): ihost_task_string = ihost['ihost_action'] or "" if ((ihost_task_string.startswith(constants.LOCK_ACTION) or - ihost_task_string.startswith(constants.FORCE_LOCK_ACTION)) and + ihost_task_string.startswith(constants.FORCE_LOCK_ACTION) or + ihost_task_string.startswith(constants.FORCE_UNSAFE_LOCK_ACTION)) and ihost['administrative'] != constants.ADMIN_LOCKED): # passed - skip reset for force-lock # iHost['ihost_action'] = constants.LOCK_ACTION @@ -6929,6 +6968,10 @@ class HostController(rest.RestController): # allow mtce to reset the host hostupdate.notify_mtce = True hostupdate.notify_action_lock_force = True + elif ihost_task_string.startswith(constants.FORCE_UNSAFE_LOCK_ACTION): + # allow mtce to reset the host + hostupdate.notify_mtce = True + hostupdate.notify_action_lock_force_unsafe = True else: hostupdate.skip_notify_mtce = True LOG.warn("%s Skipping vim services disable notification task=%s" % @@ -7029,6 +7072,17 @@ class HostController(rest.RestController): hostupdate.ihost_val_prenotify_update(val) hostupdate.ihost_val.update(val) + @staticmethod + def _handle_force_unsafe_lock_action(hostupdate): + """Handle host-force-unsafe-lock action.""" + LOG.info("%s _handle_force_unsafe_lock_action" % hostupdate.displayid) + + hostupdate.notify_vim_action = True + hostupdate.skip_notify_mtce = True + val = {'ihost_action': constants.FORCE_UNSAFE_LOCK_ACTION} + hostupdate.ihost_val_prenotify_update(val) + hostupdate.ihost_val.update(val) + @staticmethod def _check_patch_requirements(region_name, applied_patches=None, diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py index 2200f1ac57..c99109f1bf 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py @@ -626,11 +626,11 @@ def _check_host(stor): raise wsme.exc.ClientSideError(_( "System must have a %s backend" % constants.SB_TYPE_CEPH)) - # semantic check: whether host can be locked or force locked based on + # semantic check: whether host can be locked or unsafely force locked based on # ceph monitors availability if not cutils.is_aio_system(pecan.request.dbapi): - force = ihost['action'] == constants.FORCE_LOCK_ACTION - utils.check_node_lock_ceph_mon(ihost, force=force) + unsafe = ihost['action'] == constants.FORCE_UNSAFE_LOCK_ACTION + utils.check_node_lock_ceph_mon(ihost, unsafe=unsafe) def _check_disk(stor): diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py index f62cbefec6..f2230c0c4f 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py @@ -583,7 +583,7 @@ def check_all_ceph_mon_growth(ceph_mon_gib, host=None): cgtsvg_max_free_gib) -def check_node_lock_ceph_mon(ihost, force=False, ceph_helper=None): +def check_node_lock_ceph_mon(ihost, unsafe=False, ceph_helper=None): if not ceph_helper: ceph_helper = ceph.CephApiOperator() @@ -594,13 +594,14 @@ def check_node_lock_ceph_mon(ihost, force=False, ceph_helper=None): if (num_monitors <= required_monitors and is_in_active_monitors - and not force): + and not unsafe): raise wsme.exc.ClientSideError(_( "Only %d storage " "monitor available. At least %d unlocked and " "enabled hosts with monitors are required. Please" " ensure hosts with monitors are unlocked and " - "enabled.") % + "enabled." + "Use --force --unsafe if you wish to lock anyway.") % (num_monitors, required_monitors)) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/vim_api.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/vim_api.py index 171f133a1f..99d3254371 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/vim_api.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/vim_api.py @@ -101,7 +101,8 @@ def vim_host_action(token, uuid, hostname, action, timeout): response = None _valid_actions = [constants.UNLOCK_ACTION, constants.LOCK_ACTION, - constants.FORCE_LOCK_ACTION] + constants.FORCE_LOCK_ACTION, + constants.FORCE_UNSAFE_LOCK_ACTION] if action not in _valid_actions: LOG.error("Unrecognized vim_host_action=%s" % action) diff --git a/sysinv/sysinv/sysinv/sysinv/common/ceph.py b/sysinv/sysinv/sysinv/sysinv/common/ceph.py index daaeef42ff..79efbd864b 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/common/ceph.py @@ -830,7 +830,8 @@ class CephApiOperator(object): ihost = db_api.ihost_get(mon['forihostid']) host_action = ihost['ihost_action'] or "" locking = (host_action.startswith(constants.LOCK_ACTION) or - host_action.startswith(constants.FORCE_LOCK_ACTION)) + host_action.startswith(constants.FORCE_LOCK_ACTION) or + host_action.startswith(constants.FORCE_UNSAFE_LOCK_ACTION)) if (ihost['administrative'] == constants.ADMIN_UNLOCKED and ihost['operational'] == constants.OPERATIONAL_ENABLED and not locking): diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 608880ce4a..75b8b52e10 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -74,6 +74,7 @@ UNLOCK_ACTION = 'unlock' FORCE_UNLOCK_ACTION = 'force-unlock' LOCK_ACTION = 'lock' FORCE_LOCK_ACTION = 'force-lock' +FORCE_UNSAFE_LOCK_ACTION = 'force-unsafe-lock' REBOOT_ACTION = 'reboot' RESET_ACTION = 'reset' REINSTALL_ACTION = 'reinstall' @@ -108,7 +109,8 @@ MTCE_ACTIONS = [REBOOT_ACTION, # These go to VIM First VIM_ACTIONS = [LOCK_ACTION, - FORCE_LOCK_ACTION] + FORCE_LOCK_ACTION, + FORCE_UNSAFE_LOCK_ACTION] CONFIG_ACTIONS = [SUBFUNCTION_CONFIG_ACTION] @@ -167,6 +169,7 @@ ADMIN_UNLOCKED = 'unlocked' ADMIN_LOCKED = 'locked' LOCKING = 'Locking' FORCE_LOCKING = "Force Locking" +UNSAFELY_FORCE_LOCKING = "Unsafely Force Locking" OPERATIONAL_ENABLED = 'enabled' OPERATIONAL_DISABLED = 'disabled' @@ -1962,6 +1965,7 @@ APP_EVALUATE_REAPPLY_TYPE_HOST_DELETE = 'host-delete' APP_EVALUATE_REAPPLY_TYPE_HOST_REINSTALL = REINSTALL_ACTION APP_EVALUATE_REAPPLY_TYPE_HOST_LOCK = LOCK_ACTION APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_LOCK = FORCE_LOCK_ACTION +APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_UNSAFE_LOCK = FORCE_UNSAFE_LOCK_ACTION APP_EVALUATE_REAPPLY_TYPE_HOST_UNLOCK = UNLOCK_ACTION APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_UNLOCK = FORCE_UNLOCK_ACTION APP_EVALUATE_REAPPLY_TYPE_HOST_SWACT = SWACT_ACTION @@ -1983,6 +1987,8 @@ APP_EVALUATE_REAPPLY_TRIGGER_TO_METADATA_MAP = { APP_EVALUATE_REAPPLY_TYPE_HOST_LOCK, FORCE_LOCK_ACTION: APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_LOCK, + FORCE_UNSAFE_LOCK_ACTION: + APP_EVALUATE_REAPPLY_TYPE_HOST_FORCE_UNSAFE_LOCK, SWACT_ACTION: APP_EVALUATE_REAPPLY_TYPE_HOST_SWACT, FORCE_SWACT_ACTION: diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index 83c4f5f58e..7b22d01ea1 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -6277,11 +6277,15 @@ class ConductorManager(service.PeriodicService): if ihost.administrative == constants.ADMIN_UNLOCKED: ihost_action_str = ihost.ihost_action or "" - if (ihost_action_str.startswith(constants.FORCE_LOCK_ACTION) or - ihost_action_str.startswith(constants.LOCK_ACTION)): + if (ihost_action_str.startswith(constants.FORCE_UNSAFE_LOCK_ACTION) or + ihost_action_str.startswith(constants.FORCE_LOCK_ACTION) or + ihost_action_str.startswith(constants.LOCK_ACTION)): task_str = ihost.task or "" if (('--' in ihost_action_str and + ihost_action_str.startswith( + constants.FORCE_UNSAFE_LOCK_ACTION)) or + ('--' in ihost_action_str and ihost_action_str.startswith( constants.FORCE_LOCK_ACTION)) or ('----------' in ihost_action_str and @@ -6303,14 +6307,26 @@ class ConductorManager(service.PeriodicService): self._api_token, self._mtc_address, self._mtc_port, ihost_mtc, timeout_in_secs) + elif ihost_action_str.startswith(constants.FORCE_UNSAFE_LOCK_ACTION): + timeout_in_secs = 6 + ihost_mtc['operation'] = 'modify' + ihost_mtc['action'] = constants.FORCE_LOCK_ACTION + ihost_mtc['task'] = constants.FORCE_LOCKING + LOG.warn("ihost_action override %s" % + ihost_mtc) + mtce_api.host_modify( + self._api_token, self._mtc_address, self._mtc_port, + ihost_mtc, timeout_in_secs) + # need time for FORCE_LOCK mtce to clear if ('----' in ihost_action_str): ihost_action_str = "" else: ihost_action_str += "-" - if (task_str.startswith(constants.FORCE_LOCKING) or - task_str.startswith(constants.LOCKING)): + if (task_str.startswith(constants.UNSAFELY_FORCE_LOCKING) or + task_str.startswith(constants.FORCE_LOCKING) or + task_str.startswith(constants.LOCKING)): val = {'task': "", 'ihost_action': ihost_action_str, 'vim_progress_status': ""} @@ -6319,8 +6335,9 @@ class ConductorManager(service.PeriodicService): 'vim_progress_status': ""} else: ihost_action_str += "-" - if (task_str.startswith(constants.FORCE_LOCKING) or - task_str.startswith(constants.LOCKING)): + if (task_str.startswith(constants.UNSAFELY_FORCE_LOCKING) or + task_str.startswith(constants.FORCE_LOCKING) or + task_str.startswith(constants.LOCKING)): task_str += "-" val = {'task': task_str, 'ihost_action': ihost_action_str} @@ -6330,8 +6347,9 @@ class ConductorManager(service.PeriodicService): self.dbapi.ihost_update(ihost.uuid, val) else: # Administrative locked already task_str = ihost.task or "" - if (task_str.startswith(constants.FORCE_LOCKING) or - task_str.startswith(constants.LOCKING)): + if (task_str.startswith(constants.UNSAFELY_FORCE_LOCKING) or + task_str.startswith(constants.FORCE_LOCKING) or + task_str.startswith(constants.LOCKING)): val = {'task': ""} self.dbapi.ihost_update(ihost.uuid, val) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py index 29ccf0a8d1..211b75f91e 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py @@ -2510,6 +2510,47 @@ class TestPatch(TestHost): # Verify that the apps reapply was called self.fake_conductor_api.evaluate_apps_reapply.assert_called_once() + def test_unsafe_force_lock_action_controller(self): + # Create controller-0 + self._create_controller_0( + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create controller-1 + c1_host = self._create_controller_1( + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Lock host + response = self._patch_host_action(c1_host['hostname'], + constants.FORCE_UNSAFE_LOCK_ACTION, + 'sysinv-test') + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the SM lock pre check was not done + self.mock_sm_api_lock_pre_check.assert_not_called() + # Verify that the force lock was sent to the VIM + self.mock_vim_api_host_action.assert_called_with( + mock.ANY, + c1_host['uuid'], + c1_host['hostname'], + constants.FORCE_UNSAFE_LOCK_ACTION, + mock.ANY) + # Verify that the host was not configured + self.fake_conductor_api.configure_ihost.assert_not_called() + # Verify that the host was not modified in maintenance + self.mock_mtce_api_host_modify.assert_not_called() + # Verify that the host action was cleared + result = self.get_json('/ihosts/%s' % c1_host['hostname']) + self.assertEqual(constants.NONE_ACTION, result['action']) + # Verify that the apps reapply was called + self.fake_conductor_api.evaluate_apps_reapply.assert_called_once() + def test_unlock_action_controller_while_upgrading_kubelet(self): # Create controller-0 c0_host = self._create_controller_0(