From 4438b8fd5579f20ece09e670b712e3afd691bd79 Mon Sep 17 00:00:00 2001 From: Hugo Brito Date: Mon, 28 Aug 2023 19:35:23 -0300 Subject: [PATCH] Update tox pylint/pep8 for dcmanager This commit enables the check of new pylint/pep8 violations. PYLINT - All convention related checks, except: - missing-class-docstring - missing-function-docstring - missing-module-docstring - consider-using-f-string - invalid-name - import-outside-toplevel - too-many-lines - consider-iterating-dictionary - unnecessary-lambda-assignment PEP8: - E117: over-indented - E123: closing bracket does not match indentation of opening bracket's line - E125: continuation line with the same indent as the next logical line - E305: expected 2 blank lines after class or function definition - E402: module level import not at top of file - E501: line too long - H216: flag use of third party mock Test Plan: 1. Perform `tox` command - Pass in py39, pylint, pep8 Closes-bug: 2033294 Change-Id: I635df8e809905cff582bd9d5eb57b91133560cf9 Signed-off-by: Hugo Brito --- distributedcloud/.pylintrc | 151 +- distributedcloud/dcmanager/api/app.py | 4 +- .../dcmanager/api/controllers/root.py | 12 +- .../api/controllers/v1/alarm_manager.py | 8 +- .../api/controllers/v1/notifications.py | 24 +- .../controllers/v1/peer_group_association.py | 6 +- .../controllers/v1/phased_subcloud_deploy.py | 25 +- .../api/controllers/v1/subcloud_backup.py | 29 +- .../api/controllers/v1/subcloud_deploy.py | 38 +- .../api/controllers/v1/subcloud_group.py | 5 +- .../api/controllers/v1/subcloud_peer_group.py | 59 +- .../dcmanager/api/controllers/v1/subclouds.py | 108 +- .../api/controllers/v1/system_peers.py | 25 +- .../dcmanager/api/policies/alarm_manager.py | 5 +- .../api/policies/peer_group_association.py | 5 +- .../api/policies/phased_subcloud_deploy.py | 5 +- .../dcmanager/api/policies/subcloud_backup.py | 5 +- .../dcmanager/api/policies/subcloud_deploy.py | 5 +- .../dcmanager/api/policies/subcloud_group.py | 5 +- .../api/policies/subcloud_peer_group.py | 5 +- .../dcmanager/api/policies/subclouds.py | 5 +- .../api/policies/sw_update_options.py | 5 +- .../api/policies/sw_update_strategy.py | 5 +- .../dcmanager/api/policies/system_peers.py | 5 +- distributedcloud/dcmanager/api/policy.py | 5 +- .../dcmanager/audit/alarm_aggregation.py | 30 +- distributedcloud/dcmanager/audit/auditor.py | 7 +- .../dcmanager/audit/firmware_audit.py | 71 +- .../audit/kube_rootca_update_audit.py | 4 +- .../dcmanager/audit/patch_audit.py | 35 +- .../dcmanager/audit/subcloud_audit_manager.py | 71 +- .../audit/subcloud_audit_worker_manager.py | 29 +- distributedcloud/dcmanager/audit/utils.py | 47 +- distributedcloud/dcmanager/cmd/api.py | 25 +- distributedcloud/dcmanager/cmd/audit.py | 20 +- .../dcmanager/cmd/audit_worker.py | 20 +- distributedcloud/dcmanager/cmd/manage.py | 22 +- distributedcloud/dcmanager/cmd/manager.py | 29 +- .../dcmanager/cmd/orchestrator.py | 20 +- distributedcloud/dcmanager/cmd/state.py | 19 +- distributedcloud/dcmanager/common/consts.py | 27 +- distributedcloud/dcmanager/common/context.py | 34 +- .../dcmanager/common/exceptions.py | 7 +- distributedcloud/dcmanager/common/manager.py | 6 +- .../common/phased_subcloud_deploy.py | 18 +- .../dcmanager/common/serializer.py | 23 +- distributedcloud/dcmanager/common/utils.py | 116 +- distributedcloud/dcmanager/db/api.py | 62 +- .../dcmanager/db/sqlalchemy/api.py | 34 +- .../versions/001_first_version.py | 21 +- .../versions/008_add_subcloud_audits_table.py | 23 +- .../dcmanager/db/sqlalchemy/models.py | 26 +- .../manager/peer_group_audit_manager.py | 9 +- .../dcmanager/manager/peer_monitor_manager.py | 20 +- distributedcloud/dcmanager/manager/service.py | 44 +- .../dcmanager/manager/subcloud_manager.py | 175 +- .../dcmanager/manager/system_peer_manager.py | 9 +- .../dcmanager/orchestrator/orch_thread.py | 37 +- .../orchestrator/patch_orch_thread.py | 25 +- .../dcmanager/orchestrator/service.py | 27 +- .../states/creating_vim_strategy.py | 7 +- .../states/firmware/applying_vim_strategy.py | 6 +- .../states/firmware/creating_vim_strategy.py | 6 +- .../states/firmware/finishing_fw_update.py | 6 +- .../states/firmware/importing_firmware.py | 10 +- .../orchestrator/states/firmware/utils.py | 13 +- .../creating_vim_kube_upgrade_strategy.py | 11 +- .../orchestrator/states/kube/pre_check.py | 30 +- .../orchestrator/states/patch/job_data.py | 5 +- .../orchestrator/states/patch/pre_check.py | 4 +- .../states/software/deploy_start.py | 4 +- .../states/software/finish_strategy.py | 13 +- .../orchestrator/states/software/upload.py | 23 +- .../orchestrator/states/swact_host.py | 12 +- .../orchestrator/states/unlock_host.py | 6 +- .../orchestrator/states/upgrade/activating.py | 6 +- .../orchestrator/states/upgrade/completing.py | 7 +- .../states/upgrade/deleting_load.py | 6 +- .../upgrade/finishing_patch_strategy.py | 13 +- .../states/upgrade/importing_load.py | 72 +- .../states/upgrade/installing_license.py | 10 +- .../states/upgrade/migrating_data.py | 14 +- .../orchestrator/states/upgrade/pre_check.py | 190 +- .../states/upgrade/starting_upgrade.py | 12 +- .../states/upgrade/transfer_ca_certificate.py | 9 +- .../states/upgrade/updating_patches.py | 8 +- .../states/upgrade/upgrading_duplex.py | 27 +- .../states/upgrade/upgrading_simplex.py | 41 +- .../orchestrator/sw_update_manager.py | 61 +- .../orchestrator/sw_upgrade_orch_thread.py | 26 +- distributedcloud/dcmanager/rpc/client.py | 65 +- distributedcloud/dcmanager/state/service.py | 13 +- .../dcmanager/state/subcloud_state_manager.py | 53 +- distributedcloud/dcmanager/tests/base.py | 14 +- .../test_peer_group_association.py | 12 +- .../test_phased_subcloud_deploy.py | 65 +- .../v1/controllers/test_subcloud_backup.py | 1870 ++++++----- .../v1/controllers/test_subcloud_deploy.py | 235 +- .../controllers/test_subcloud_peer_group.py | 49 +- .../unit/api/v1/controllers/test_subclouds.py | 2773 ++++++++++------- .../api/v1/controllers/test_system_peer.py | 6 +- .../unit/audit/test_firmware_audit_manager.py | 26 +- .../unit/audit/test_kube_audit_manager.py | 24 +- .../unit/audit/test_patch_audit_manager.py | 31 +- .../tests/unit/audit/test_service.py | 23 +- .../unit/audit/test_subcloud_audit_manager.py | 30 +- .../test_subcloud_audit_worker_manager.py | 228 +- .../common/test_phased_subcloud_deploy.py | 17 +- .../tests/unit/db/test_subcloud_alarms.py | 23 +- .../tests/unit/db/test_subcloud_audits.py | 165 +- .../tests/unit/db/test_subcloud_db_api.py | 435 +-- .../tests/unit/manager/test_service.py | 34 +- .../unit/manager/test_subcloud_manager.py | 507 +-- .../unit/manager/test_system_peer_manager.py | 9 +- .../dcmanager/tests/unit/objects/test_base.py | 5 +- .../tests/unit/orchestrator/states/fakes.py | 7 +- .../firmware/test_finishing_vim_strategy.py | 17 +- .../firmware/test_importing_firmware.py | 123 +- ...test_creating_vim_kube_upgrade_strategy.py | 9 +- .../states/kube/test_pre_check.py | 42 +- .../patch/test_creating_vim_patch_strategy.py | 6 +- .../patch/test_finishing_patch_strategy.py | 6 +- .../states/patch/test_pre_check.py | 6 +- .../states/patch/test_updating_patches.py | 6 +- .../states/prestage/test_states.py | 153 +- .../orchestrator/states/software/test_base.py | 5 +- .../states/software/test_lock_controller.py | 18 +- .../states/software/test_upload.py | 56 +- .../states/test_applying_vim_strategy.py | 108 +- .../states/upgrade/test_activating_upgrade.py | 23 +- .../states/upgrade/test_completing_upgrade.py | 19 +- .../states/upgrade/test_deleting_load.py | 19 +- .../upgrade/test_finishing_patch_strategy.py | 7 +- .../states/upgrade/test_importing_load.py | 11 +- .../states/upgrade/test_installing_license.py | 11 +- .../states/upgrade/test_lock_controller.py | 34 +- .../states/upgrade/test_migrating_data.py | 27 +- .../states/upgrade/test_pre_check.py | 786 +++-- .../states/upgrade/test_starting_upgrade.py | 14 +- .../states/upgrade/test_swact_controller.py | 13 +- .../states/upgrade/test_updating_patches.py | 14 +- .../states/upgrade/test_upgrading_duplex.py | 10 +- .../states/upgrade/test_upgrading_simplex.py | 11 +- .../tests/unit/orchestrator/test_base.py | 22 +- .../orchestrator/test_sw_update_manager.py | 1581 ++++++---- distributedcloud/dcmanager/tests/utils.py | 8 +- distributedcloud/tox.ini | 12 +- 147 files changed, 6979 insertions(+), 5321 deletions(-) diff --git a/distributedcloud/.pylintrc b/distributedcloud/.pylintrc index 658b61b7d..b4834b8be 100644 --- a/distributedcloud/.pylintrc +++ b/distributedcloud/.pylintrc @@ -22,127 +22,50 @@ load-plugins= [MESSAGES CONTROL] -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. -# -# Python3 checker: -# -# E1601: print-statement -# E1602: parameter-unpacking -# E1603: unpacking-in-except -# E1604: old-raise-syntax -# E1605: backtick -# E1609: import-star-module-level -# W1601: apply-builtin -# W1602: basestring-builtin -# W1603: buffer-builtin -# W1604: cmp-builtin -# W1605: coerce-builtin -# W1606: execfile-builtin -# W1607: file-builtin -# W1608: long-builtin -# W1609: raw_input-builtin -# W1610: reduce-builtin -# W1611: standarderror-builtin -# W1612: unicode-builtin -# W1613: xrange-builtin -# W1614: coerce-method -# W1615: delslice-method -# W1616: getslice-method -# W1617: setslice-method -# W1618: no-absolute-import -# W1619: old-division -# W1620: dict-iter-method -# W1621: dict-view-method -# W1622: next-method-called -# W1623: metaclass-assignment -# W1624: indexing-exception -# W1625: raising-string -# W1626: reload-builtin -# W1627: oct-method -# W1628: hex-method -# W1629: nonzero-method -# W1630: cmp-method -# W1632: input-builtin -# W1633: round-builtin -# W1634: intern-builtin -# W1635: unichr-builtin -# W1636: map-builtin-not-iterating -# W1637: zip-builtin-not-iterating -# W1638: range-builtin-not-iterating -# W1639: filter-builtin-not-iterating -# W1640: using-cmp-argument -# W1642: div-method -# W1643: idiv-method -# W1644: rdiv-method -# W1645: exception-message-attribute -# W1646: invalid-str-codec -# W1647: sys-max-int -# W1648: bad-python3-import -# W1649: deprecated-string-function -# W1650: deprecated-str-translate-call -# W1651: deprecated-itertools-function -# W1652: deprecated-types-field -# W1653: next-method-defined -# W1654: dict-items-not-iterating -# W1655: dict-keys-not-iterating -# W1656: dict-values-not-iterating -# W1657: deprecated-operator-function -# W1658: deprecated-urllib-function -# W1659: xreadlines-attribute -# W1660: deprecated-sys-function -# W1661: exception-escape -# W1662: comprehension-escape -enable=E1603,E1609,E1602,E1605,E1604,E1601,W1652,W1651,W1649,W1657,W1660,W1658, - W1659,W1623,W1622,W1620,W1621,W1645,W1624,W1648,W1625,W1611,W1662,W1661, - W1650,W1640,W1630,W1614,W1615,W1642,W1616,W1628,W1643,W1629,W1627,W1644, - W1617,W1601,W1602,W1603,W1604,W1605,W1654,W1655,W1656,W1619,W1606,W1607, - W1639,W1618,W1632,W1634,W1608,W1636,W1653,W1646,W1638,W1609,W1610,W1626, - W1633,W1647,W1635,W1612,W1613,W1637 - # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). # https://pylint.readthedocs.io/en/latest/user_guide/output.html#source-code-analysis-section # R detect Refactor for a "good practice" metric violation -# C detect Convention for coding standard violation # W detect Warning for stylistic problems, or minor programming issues -# W0102: dangerous-default-value -# W0105: pointless-string-statement -# W0107: unnecessary-pass -# W0123: eval-used -# W0201: attribute-defined-outside-init -# W0211: bad-staticmethod-argument -# W0212: protected-access -# W0221: arguments-differ -# W0223: abstract-method -# W0231: super-init-not-called -# W0235: useless-super-delegation -# W0311: bad-indentation -# W0402: deprecated-module -# W0603: global-statement -# W0612: unused-variable -# W0613: unused-argument -# W0621: redefined-outer-name -# W0622: redefined-builtin -# W0631: undefined-loop-variable -# W0703: broad-except -# W0706: try-except-raise -# W0707: raise-missing-from -# W1113: keyword-arg-before-vararg -# W1201: logging-not-lazy -# W1401: anomalous-backslash-in-string -# W1406: redundant-u-string-prefix -# W1514: unspecified-encoding -# W1618: no-absolute-import -disable=C,R,fixme, - W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221, - W0223,W0231,W0235,W0311,W0402,W0603,W0612,W0613, - W0621,W0622,W0631,W0703,W0706,W0707,W1113,W1201, - W1401,W1406,W1514,W1618 - +disable=R,fixme, + dangerous-default-value, + pointless-string-statement, + unnecessary-pass, + eval-used, + attribute-defined-outside-init, + bad-staticmethod-argument, + protected-access, + arguments-differ, + abstract-method, + super-init-not-called, + useless-super-delegation, + deprecated-module, + global-statement, + unused-variable, + unused-argument, + redefined-outer-name, + redefined-builtin, + undefined-loop-variable, + broad-except, + try-except-raise, + raise-missing-from, + keyword-arg-before-vararg, + logging-not-lazy, + anomalous-backslash-in-string, + redundant-u-string-prefix, + unspecified-encoding, + no-absolute-import, + missing-class-docstring, + missing-function-docstring, + missing-module-docstring, + consider-using-f-string, + invalid-name, + import-outside-toplevel, + too-many-lines, + consider-iterating-dictionary, + unnecessary-lambda-assignment [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs diff --git a/distributedcloud/dcmanager/api/app.py b/distributedcloud/dcmanager/api/app.py index d2982006d..3798016ad 100644 --- a/distributedcloud/dcmanager/api/app.py +++ b/distributedcloud/dcmanager/api/app.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Huawei, Tech. Co,. Ltd. -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -42,9 +42,9 @@ def setup_app(*args, **kwargs): 'errors': { 400: '/error', '__force_dict__': True - } } } + } pecan_config = pecan.configuration.conf_from_dict(config) diff --git a/distributedcloud/dcmanager/api/controllers/root.py b/distributedcloud/dcmanager/api/controllers/root.py index 633a94a84..a0ef7e37e 100644 --- a/distributedcloud/dcmanager/api/controllers/root.py +++ b/distributedcloud/dcmanager/api/controllers/root.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Huawei Tech. Co., Ltd. -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -42,13 +42,13 @@ class RootController(object): { "rel": "self", "href": pecan.request.application_url + "/v1.0/" - } - ], + } + ], "id": "v1.0", "updated": "2017-10-2" - } - ] - } + } + ] + } @index.when(method='POST') @index.when(method='PUT') diff --git a/distributedcloud/dcmanager/api/controllers/v1/alarm_manager.py b/distributedcloud/dcmanager/api/controllers/v1/alarm_manager.py index 691276988..e880c47f8 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/alarm_manager.py +++ b/distributedcloud/dcmanager/api/controllers/v1/alarm_manager.py @@ -1,5 +1,5 @@ # Copyright (c) 2017 Ericsson AB. -# Copyright (c) 2017-2022 Wind River Systems, Inc. +# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,15 +15,15 @@ # under the License. # +from oslo_log import log as logging +from pecan import expose + from dcmanager.api.controllers import restcomm from dcmanager.api.policies import alarm_manager as alarm_manager_policy from dcmanager.api import policy from dcmanager.common import consts from dcmanager.db import api as db_api -from oslo_log import log as logging -from pecan import expose - LOG = logging.getLogger(__name__) diff --git a/distributedcloud/dcmanager/api/controllers/v1/notifications.py b/distributedcloud/dcmanager/api/controllers/v1/notifications.py index 65a3ea719..6ddcc1383 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/notifications.py +++ b/distributedcloud/dcmanager/api/controllers/v1/notifications.py @@ -1,21 +1,22 @@ -# Copyright (c) 2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # +import http.client as httpclient from oslo_config import cfg from oslo_log import log as logging - -import http.client as httpclient import pecan from pecan import expose from pecan import request @@ -23,7 +24,6 @@ from pecan import request from dcmanager.api.controllers import restcomm from dcmanager.audit import rpcapi as audit_rpc_client - CONF = cfg.CONF LOG = logging.getLogger(__name__) diff --git a/distributedcloud/dcmanager/api/controllers/v1/peer_group_association.py b/distributedcloud/dcmanager/api/controllers/v1/peer_group_association.py index 5c7143085..f4c849234 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/peer_group_association.py +++ b/distributedcloud/dcmanager/api/controllers/v1/peer_group_association.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -150,7 +150,7 @@ class PeerGroupAssociationsController(restcomm.GenericPathController): return False # Less than min or greater than max priority is not supported. if val < MIN_PEER_GROUP_ASSOCIATION_PRIORITY or \ - val > MAX_PEER_GROUP_ASSOCIATION_PRIORITY: + val > MAX_PEER_GROUP_ASSOCIATION_PRIORITY: LOG.debug("Invalid Peer Group Priority out of support range: %s" % peer_group_priority) return False @@ -187,7 +187,7 @@ class PeerGroupAssociationsController(restcomm.GenericPathController): peer_group = db_api.subcloud_peer_group_get(context, peer_group_id) if peer_group_priority is not None and not \ - self._validate_peer_group_priority(peer_group_priority): + self._validate_peer_group_priority(peer_group_priority): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer_group_priority')) diff --git a/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py b/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py index 42bf2ade8..82e0b0420 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -278,9 +278,12 @@ class PhasedSubcloudDeployController(object): utils.get_management_gateway_address(payload) subcloud_dict['management-start-ip'] = \ utils.get_management_start_address(payload) - subcloud_dict['management-end-ip'] = utils.get_management_end_address(payload) + subcloud_dict['management-end-ip'] = \ + utils.get_management_end_address(payload) subcloud_dict['systemcontroller-gateway-ip'] = payload.get( - "systemcontroller_gateway_address", subcloud.systemcontroller_gateway_ip) + "systemcontroller_gateway_address", + subcloud.systemcontroller_gateway_ip + ) return subcloud_dict except RemoteError as e: @@ -425,10 +428,13 @@ class PhasedSubcloudDeployController(object): # Consider the incoming release parameter only if install is one # of the pending deploy states if INSTALL in deploy_states_to_run: - unvalidated_sw_version = payload.get('release', subcloud.software_version) + unvalidated_sw_version = \ + payload.get('release', subcloud.software_version) else: - LOG.debug('Disregarding release parameter for %s as installation is complete.' - % subcloud.name) + LOG.debug( + 'Disregarding release parameter for %s as installation is complete.' + % subcloud.name + ) unvalidated_sw_version = subcloud.software_version # get_sw_version will simply return back the passed @@ -474,9 +480,12 @@ class PhasedSubcloudDeployController(object): utils.get_management_gateway_address(payload) subcloud_dict['management-start-ip'] = \ utils.get_management_start_address(payload) - subcloud_dict['management-end-ip'] = utils.get_management_end_address(payload) + subcloud_dict['management-end-ip'] = \ + utils.get_management_end_address(payload) subcloud_dict['systemcontroller-gateway-ip'] = payload.get( - "systemcontroller_gateway_address", subcloud.systemcontroller_gateway_ip) + "systemcontroller_gateway_address", + subcloud.systemcontroller_gateway_ip + ) return subcloud_dict except RemoteError as e: pecan.abort(422, e.value) diff --git a/distributedcloud/dcmanager/api/controllers/v1/subcloud_backup.py b/distributedcloud/dcmanager/api/controllers/v1/subcloud_backup.py index 3162ae0e5..f03d84ba9 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subcloud_backup.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subcloud_backup.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2022-2023 Wind River Systems, Inc. +# Copyright (c) 2022-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -98,7 +98,8 @@ class SubcloudBackupController(object): if param in request.POST: file_item = request.POST[param] file_item.file.seek(0, os.SEEK_SET) - data = utils.yaml_safe_load(file_item.file.read().decode('utf8'), param) + data = \ + utils.yaml_safe_load(file_item.file.read().decode('utf8'), param) payload.update({param: data}) del request.POST[param] @@ -176,7 +177,8 @@ class SubcloudBackupController(object): operation (string): Subcloud backup operation """ subclouds = request_entity.subclouds - error_msg = _('Subcloud(s) must be in a valid state for backup %s.' % operation) + error_msg = _( + 'Subcloud(s) must be in a valid state for backup %s.' % operation) has_valid_subclouds = False valid_subclouds = list() for subcloud in subclouds: @@ -361,8 +363,10 @@ class SubcloudBackupController(object): payload.get('restore_values', {}).get('bootstrap_address', {}) if not isinstance(bootstrap_address_dict, dict): - pecan.abort(400, _('The bootstrap_address provided in restore_values ' - 'is in invalid format.')) + pecan.abort( + 400, _('The bootstrap_address provided in restore_values ' + 'is in invalid format.') + ) restore_subclouds = self._validate_subclouds( request_entity, verb, bootstrap_address_dict) @@ -376,13 +380,16 @@ class SubcloudBackupController(object): ] if subclouds_without_install_values: subclouds_str = ', '.join(subclouds_without_install_values) - pecan.abort(400, _('The restore operation was requested with_install, ' - 'but the following subcloud(s) does not contain ' - 'install values: %s' % subclouds_str)) + pecan.abort( + 400, _('The restore operation was requested with_install, ' + 'but the following subcloud(s) does not contain ' + 'install values: %s' % subclouds_str) + ) # Confirm the requested or active load is still in dc-vault payload['software_version'] = utils.get_sw_version( payload.get('release')) - matching_iso, err_msg = utils.get_matching_iso(payload['software_version']) + matching_iso, err_msg = \ + utils.get_matching_iso(payload['software_version']) if err_msg: LOG.exception(err_msg) pecan.abort(400, _(err_msg)) @@ -391,8 +398,10 @@ class SubcloudBackupController(object): try: # local update to deploy_status - this is just for CLI response + # pylint: disable-next=consider-using-enumerate for i in range(len(restore_subclouds)): - restore_subclouds[i].deploy_status = consts.DEPLOY_STATE_PRE_RESTORE + restore_subclouds[i].deploy_status = ( + consts.DEPLOY_STATE_PRE_RESTORE) message = self.dcmanager_rpc_client.restore_subcloud_backups( context, payload) return utils.subcloud_db_list_to_dict(restore_subclouds) diff --git a/distributedcloud/dcmanager/api/controllers/v1/subcloud_deploy.py b/distributedcloud/dcmanager/api/controllers/v1/subcloud_deploy.py index 78382111a..1e74ce011 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subcloud_deploy.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subcloud_deploy.py @@ -12,17 +12,16 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import os +import http.client as httpclient from oslo_config import cfg from oslo_log import log as logging - -import http.client as httpclient import pecan from pecan import expose from pecan import request @@ -35,7 +34,6 @@ from dcmanager.common import consts from dcmanager.common.i18n import _ from dcmanager.common import utils - CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -96,16 +94,19 @@ class SubcloudDeployController(object): if len(missing_options) > 0: if ((consts.DEPLOY_PRESTAGE in missing_options and size != 1) or (consts.DEPLOY_PRESTAGE not in missing_options and size != 3)): - missing_str = str() - for missing in missing_options: - if missing is not consts.DEPLOY_PRESTAGE: - missing_str += '--%s ' % missing - error_msg = "error: argument %s is required" % missing_str.rstrip() - pecan.abort(httpclient.BAD_REQUEST, error_msg) + missing_str = str() + for missing in missing_options: + if missing is not consts.DEPLOY_PRESTAGE: + missing_str += '--%s ' % missing + error_msg = "error: argument %s is required" % missing_str.rstrip() + pecan.abort(httpclient.BAD_REQUEST, error_msg) - deploy_dicts['software_version'] = utils.get_sw_version(request.POST.get('release')) + deploy_dicts['software_version'] = \ + utils.get_sw_version(request.POST.get('release')) - dir_path = os.path.join(dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version']) + dir_path = os.path.join( + dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version'] + ) for f in consts.DEPLOY_COMMON_FILE_OPTIONS: if f not in request.POST: continue @@ -139,7 +140,9 @@ class SubcloudDeployController(object): restcomm.extract_credentials_for_policy()) deploy_dicts = dict() deploy_dicts['software_version'] = utils.get_sw_version(release) - dir_path = os.path.join(dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version']) + dir_path = os.path.join( + dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version'] + ) for f in consts.DEPLOY_COMMON_FILE_OPTIONS: filename = None if os.path.isdir(dir_path): @@ -159,10 +162,13 @@ class SubcloudDeployController(object): policy.authorize(subcloud_deploy_policy.POLICY_ROOT % "delete", {}, restcomm.extract_credentials_for_policy()) - is_prestage_images = request.params.get('prestage_images', '').lower() == 'true' - is_deployment_files = request.params.get('deployment_files', '').lower() == 'true' + is_prestage_images = \ + request.params.get('prestage_images', '').lower() == 'true' + is_deployment_files = \ + request.params.get('deployment_files', '').lower() == 'true' - dir_path = os.path.join(dccommon_consts.DEPLOY_DIR, utils.get_sw_version(release)) + dir_path = \ + os.path.join(dccommon_consts.DEPLOY_DIR, utils.get_sw_version(release)) if not os.path.isdir(dir_path): pecan.abort(httpclient.NOT_FOUND, _("Directory not found: %s" % dir_path)) diff --git a/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py b/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py index b1b5d9c35..f69dff628 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py @@ -1,5 +1,5 @@ # Copyright (c) 2017 Ericsson AB. -# Copyright (c) 2020-2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,12 +15,11 @@ # under the License. # +import http.client as httpclient from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_messaging import RemoteError - -import http.client as httpclient import pecan from pecan import expose from pecan import request diff --git a/distributedcloud/dcmanager/api/controllers/v1/subcloud_peer_group.py b/distributedcloud/dcmanager/api/controllers/v1/subcloud_peer_group.py index de51d0789..1299612a6 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subcloud_peer_group.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subcloud_peer_group.py @@ -1,19 +1,21 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # +import json +import uuid + +import http.client as httpclient + from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_messaging import RemoteError -import http.client as httpclient -import json import pecan from pecan import expose from pecan import request -import uuid from dccommon import consts as dccommon_consts from dccommon.drivers.openstack.sdk_platform import OpenStackDriver @@ -140,7 +142,8 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController): if group is None: pecan.abort(httpclient.NOT_FOUND, _("Subcloud Peer Group not found")) if verb is None: - subcloud_peer_group_dict = db_api.subcloud_peer_group_db_model_to_dict(group) + subcloud_peer_group_dict = \ + db_api.subcloud_peer_group_db_model_to_dict(group) return subcloud_peer_group_dict elif verb == 'subclouds': # Return only the subclouds for this subcloud peer group @@ -285,29 +288,31 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController): if (peer_group_name is not None and not utils.validate_name(peer_group_name, prohibited_name_list=['none'])): - pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer-group-name')) + pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer-group-name')) if (group_priority is not None and not self._validate_group_priority(group_priority)): - pecan.abort(httpclient.BAD_REQUEST, _('Invalid group-priority')) + pecan.abort(httpclient.BAD_REQUEST, _('Invalid group-priority')) if group_state and not self._validate_group_state(group_state): - pecan.abort(httpclient.BAD_REQUEST, - _('Invalid group-state')) + pecan.abort(httpclient.BAD_REQUEST, + _('Invalid group-state')) if (max_subcloud_rehoming is not None and not self._validate_max_subcloud_rehoming(max_subcloud_rehoming)): pecan.abort(httpclient.BAD_REQUEST, _('Invalid max-subcloud-rehoming')) if (system_leader_id and - not self._validate_system_leader_id(system_leader_id)): - pecan.abort(httpclient.BAD_REQUEST, - _('Invalid system-leader-id')) + not self._validate_system_leader_id(system_leader_id)): + pecan.abort(httpclient.BAD_REQUEST, + _('Invalid system-leader-id')) if (system_leader_name is not None and - not utils.validate_name(system_leader_name)): - pecan.abort(httpclient.BAD_REQUEST, - _('Invalid system-leader-name')) + not utils.validate_name(system_leader_name)): + pecan.abort(httpclient.BAD_REQUEST, + _('Invalid system-leader-name')) if (migration_status and - migration_status.lower() not in [consts.PEER_GROUP_MIGRATING, - consts.PEER_GROUP_MIGRATION_COMPLETE, - consts.PEER_GROUP_MIGRATION_NONE]): + migration_status.lower() not in [ + consts.PEER_GROUP_MIGRATING, + consts.PEER_GROUP_MIGRATION_COMPLETE, + consts.PEER_GROUP_MIGRATION_NONE + ]): pecan.abort(httpclient.BAD_REQUEST, _('Invalid migration_status')) @@ -322,7 +327,9 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController): system_leader_id=system_leader_id, system_leader_name=system_leader_name, migration_status=migration_status) - return db_api.subcloud_peer_group_db_model_to_dict(updated_peer_group) + return db_api.subcloud_peer_group_db_model_to_dict( + updated_peer_group + ) except RemoteError as e: pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value) except Exception as e: @@ -427,31 +434,31 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController): payload = json.loads(request.body) if 'peer_uuid' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing peer_uuid' % + '%s, missing peer_uuid' % group.peer_group_name)) if 'peer_group_name' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing peer_group_name' % + '%s, missing peer_group_name' % group.peer_group_name)) if 'group_priority' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing group_priority' % + '%s, missing group_priority' % group.peer_group_name)) if 'group_state' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing group_state' % + '%s, missing group_state' % group.peer_group_name)) if 'system_leader_id' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing system_leader_id' % + '%s, missing system_leader_id' % group.peer_group_name)) if 'system_leader_name' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing system_leader_name' % + '%s, missing system_leader_name' % group.peer_group_name)) if 'migration_status' not in payload: pecan.abort(400, _('Unable to audit peer group ' - '%s, missing migration_status' % + '%s, missing migration_status' % group.peer_group_name)) try: msg = self.rpc_client.peer_group_audit_notify( diff --git a/distributedcloud/dcmanager/api/controllers/v1/subclouds.py b/distributedcloud/dcmanager/api/controllers/v1/subclouds.py index 7192e3bc2..ec024e73e 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subclouds.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subclouds.py @@ -17,29 +17,32 @@ # SPDX-License-Identifier: Apache-2.0 # - -from requests_toolbelt.multipart import decoder - import base64 import json -import keyring import os +import re + +import keyring + from oslo_config import cfg from oslo_log import log as logging from oslo_messaging import RemoteError -import re + +from requests_toolbelt.multipart import decoder import pecan from pecan import expose from pecan import request +from fm_api.constants import FM_ALARM_ID_UNSYNCHRONIZED_RESOURCE + +from keystoneauth1 import exceptions as keystone_exceptions + from dccommon import consts as dccommon_consts from dccommon.drivers.openstack.fm import FmClient from dccommon.drivers.openstack.sysinv_v1 import SysinvClient from dccommon import exceptions as dccommon_exceptions -from keystoneauth1 import exceptions as keystone_exceptions - from dcmanager.api.controllers import restcomm from dcmanager.api.policies import subclouds as subclouds_policy from dcmanager.api import policy @@ -52,8 +55,6 @@ from dcmanager.common import utils from dcmanager.db import api as db_api from dcmanager.rpc import client as rpc_client -from fm_api.constants import FM_ALARM_ID_UNSYNCHRONIZED_RESOURCE - CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -263,7 +264,9 @@ class SubcloudsController(object): LOG.error(message) return None - def _get_deploy_config_sync_status(self, context, subcloud_name, keystone_client): + def _get_deploy_config_sync_status( + self, context, subcloud_name, keystone_client + ): """Get the deploy configuration insync status of the subcloud """ detected_alarms = None try: @@ -371,7 +374,7 @@ class SubcloudsController(object): if subcloud_status: subcloud_status_list.append( - db_api.subcloud_endpoint_status_db_model_to_dict( # noqa + db_api.subcloud_endpoint_status_db_model_to_dict( subcloud_status)) subcloud_list[-1][ consts.ENDPOINT_SYNC_STATUS] = subcloud_status_list @@ -380,7 +383,7 @@ class SubcloudsController(object): subcloud_status_list = [] if subcloud_status: subcloud_status_list.append( - db_api.subcloud_endpoint_status_db_model_to_dict( # noqa + db_api.subcloud_endpoint_status_db_model_to_dict( subcloud_status)) subcloud_list.append(subcloud_dict) @@ -457,13 +460,15 @@ class SubcloudsController(object): if detail is not None: oam_floating_ip = "unavailable" deploy_config_sync_status = "unknown" - if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE: + if (subcloud.availability_status == + dccommon_consts.AVAILABILITY_ONLINE): # Get the keystone client that will be used # for _get_deploy_config_sync_status and _get_oam_addresses sc_ks_client = psd_common.get_ks_client(subcloud_region) - oam_addresses = self._get_oam_addresses(context, - subcloud_region, sc_ks_client) + oam_addresses = self._get_oam_addresses( + context, subcloud_region, sc_ks_client + ) if oam_addresses is not None: oam_floating_ip = oam_addresses.oam_floating_ip @@ -472,9 +477,11 @@ class SubcloudsController(object): if deploy_config_state is not None: deploy_config_sync_status = deploy_config_state - extra_details = {"oam_floating_ip": oam_floating_ip, - "deploy_config_sync_status": deploy_config_sync_status, - "region_name": subcloud_region} + extra_details = { + "oam_floating_ip": oam_floating_ip, + "deploy_config_sync_status": deploy_config_sync_status, + "region_name": subcloud_region + } subcloud_dict.update(extra_details) return subcloud_dict @@ -594,26 +601,39 @@ class SubcloudsController(object): # Rename the subcloud new_subcloud_name = payload.get('name') if new_subcloud_name is not None: - # To be renamed the subcloud must be in unmanaged and valid deploy state - if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED \ - or subcloud.deploy_status not in consts.STATES_FOR_SUBCLOUD_RENAME: - msg = ('Subcloud %s must be unmanaged and in a valid deploy state ' - 'for the subcloud rename operation.' % subcloud.name) + # To be renamed the subcloud must be in unmanaged and valid deploy + # state + if (subcloud.management_state != + dccommon_consts.MANAGEMENT_UNMANAGED or + subcloud.deploy_status not in + consts.STATES_FOR_SUBCLOUD_RENAME): + msg = ( + 'Subcloud %s must be unmanaged and in a valid deploy state ' + 'for the subcloud rename operation.' % + subcloud.name + ) pecan.abort(400, msg) # Validates new name if not utils.is_subcloud_name_format_valid(new_subcloud_name): - pecan.abort(400, _("new name must contain alphabetic characters")) + pecan.abort( + 400, _("new name must contain alphabetic characters") + ) # Checks if new subcloud name is the same as the current subcloud if new_subcloud_name == subcloud.name: - pecan.abort(400, _('Provided subcloud name %s is the same as the ' - 'current subcloud %s. A different name is ' - 'required to rename the subcloud' % - (new_subcloud_name, subcloud.name))) + pecan.abort( + 400, _('Provided subcloud name %s is the same as the ' + 'current subcloud %s. A different name is ' + 'required to rename the subcloud' % + (new_subcloud_name, subcloud.name)) + ) + + error_msg = ( + 'Unable to rename subcloud %s with their region %s to %s' % + (subcloud.name, subcloud.region_name, new_subcloud_name) + ) - error_msg = ('Unable to rename subcloud %s with their region %s to %s' % - (subcloud.name, subcloud.region_name, new_subcloud_name)) try: LOG.info("Renaming subcloud %s to: %s\n" % (subcloud.name, new_subcloud_name)) @@ -720,12 +740,14 @@ class SubcloudsController(object): if pgrp.group_priority > 0: pecan.abort(400, _("Cannot set the subcloud to a peer" " group with non-zero priority.")) - elif not (subcloud.deploy_status in [consts.DEPLOY_STATE_DONE, - consts.PRESTAGE_STATE_COMPLETE] - and subcloud.management_state == - dccommon_consts.MANAGEMENT_MANAGED - and subcloud.availability_status == - dccommon_consts.AVAILABILITY_ONLINE): + elif not ( + subcloud.deploy_status in [ + consts.DEPLOY_STATE_DONE, + consts.PRESTAGE_STATE_COMPLETE + ] and subcloud.management_state == + dccommon_consts.MANAGEMENT_MANAGED + and subcloud.availability_status == + dccommon_consts.AVAILABILITY_ONLINE): pecan.abort(400, _("Only subclouds that are " "managed and online can be " "added to a peer group.")) @@ -770,13 +792,15 @@ class SubcloudsController(object): payload = psd_common.get_request_data( request, subcloud, SUBCLOUD_REDEPLOY_GET_FILE_CONTENTS) - if (subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE or - subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED): + if (subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE + or subcloud.management_state == + dccommon_consts.MANAGEMENT_MANAGED): msg = _('Cannot re-deploy an online and/or managed subcloud') LOG.warning(msg) pecan.abort(400, msg) - payload['software_version'] = utils.get_sw_version(payload.get('release')) + payload['software_version'] = \ + utils.get_sw_version(payload.get('release')) # Don't load previously stored bootstrap_values if they are present in # the request, as this would override the already loaded values from it. @@ -831,8 +855,10 @@ class SubcloudsController(object): 'Please use /v1.0/subcloud-backup/restore')) elif verb == "reconfigure": - pecan.abort(410, _('This API is deprecated. ' - 'Please use /v1.0/phased-subcloud-deploy/{subcloud}/configure')) + pecan.abort( + 410, _('This API is deprecated. Please use ' + '/v1.0/phased-subcloud-deploy/{subcloud}/configure') + ) elif verb == "reinstall": pecan.abort(410, _('This API is deprecated. ' diff --git a/distributedcloud/dcmanager/api/controllers/v1/system_peers.py b/distributedcloud/dcmanager/api/controllers/v1/system_peers.py index 55034365b..c365152b0 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/system_peers.py +++ b/distributedcloud/dcmanager/api/controllers/v1/system_peers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -132,7 +132,7 @@ class SystemPeersController(restcomm.GenericPathController): def _validate_manager_endpoint(self, endpoint): if not endpoint or len(endpoint) >= MAX_SYSTEM_PEER_MANAGER_ENDPOINT_LEN or \ - not endpoint.startswith(("http", "https")): + not endpoint.startswith(("http", "https")): LOG.debug("Invalid manager_endpoint: %s" % endpoint) return False return True @@ -176,7 +176,7 @@ class SystemPeersController(restcomm.GenericPathController): # We do not support less than min or greater than max if val < MIN_SYSTEM_PEER_HEARTBEAT_INTERVAL or \ - val > MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL: + val > MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL: LOG.debug("Invalid heartbeat_interval: %s" % heartbeat_interval) return False return True @@ -193,15 +193,14 @@ class SystemPeersController(restcomm.GenericPathController): # We do not support less than min or greater than max if val < MIN_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD or \ - val > MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD: + val > MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD: LOG.debug("Invalid heartbeat_failure_threshold: %s" % heartbeat_failure_threshold) return False return True def _validate_heartbeat_failure_policy(self, heartbeat_failure_policy): - if heartbeat_failure_policy not in \ - SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST: + if heartbeat_failure_policy not in SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST: LOG.debug("Invalid heartbeat_failure_policy: %s" % heartbeat_failure_policy) return False @@ -219,7 +218,7 @@ class SystemPeersController(restcomm.GenericPathController): # We do not support less than min or greater than max if val < MIN_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT or \ - val > MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT: + val > MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT: LOG.debug("Invalid heartbeat_maintenance_timeout: %s" % heartbeat_maintenance_timeout) return False @@ -287,7 +286,7 @@ class SystemPeersController(restcomm.GenericPathController): payload.get('heartbeat_failure_threshold') if heartbeat_failure_threshold is not None: if not self._validate_heartbeat_failure_threshold( - heartbeat_failure_threshold): + heartbeat_failure_threshold): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer heartbeat_failure_threshold')) kwargs['heartbeat_failure_threshold'] = heartbeat_failure_threshold @@ -295,7 +294,7 @@ class SystemPeersController(restcomm.GenericPathController): heartbeat_failure_policy = payload.get('heartbeat_failure_policy') if heartbeat_failure_policy: if not self._validate_heartbeat_failure_policy( - heartbeat_failure_policy): + heartbeat_failure_policy): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer heartbeat_failure_policy')) kwargs['heartbeat_failure_policy'] = heartbeat_failure_policy @@ -304,7 +303,7 @@ class SystemPeersController(restcomm.GenericPathController): payload.get('heartbeat_maintenance_timeout') if heartbeat_maintenance_timeout is not None: if not self._validate_heartbeat_maintenance_timeout( - heartbeat_maintenance_timeout): + heartbeat_maintenance_timeout): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer heartbeat_maintenance_timeout')) kwargs['heartbeat_maintenance_timeout'] = \ @@ -419,19 +418,19 @@ class SystemPeersController(restcomm.GenericPathController): if heartbeat_failure_threshold: if not self._validate_heartbeat_failure_threshold( - heartbeat_failure_threshold): + heartbeat_failure_threshold): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer heartbeat_failure_threshold')) if heartbeat_failure_policy: if not self._validate_heartbeat_failure_policy( - heartbeat_failure_policy): + heartbeat_failure_policy): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer heartbeat_failure_policy')) if heartbeat_maintenance_timeout: if not self._validate_heartbeat_maintenance_timeout( - heartbeat_maintenance_timeout): + heartbeat_maintenance_timeout): pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer heartbeat_maintenance_timeout')) diff --git a/distributedcloud/dcmanager/api/policies/alarm_manager.py b/distributedcloud/dcmanager/api/policies/alarm_manager.py index 45247f858..d23524e67 100644 --- a/distributedcloud/dcmanager/api/policies/alarm_manager.py +++ b/distributedcloud/dcmanager/api/policies/alarm_manager.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:alarm_manager:%s' diff --git a/distributedcloud/dcmanager/api/policies/peer_group_association.py b/distributedcloud/dcmanager/api/policies/peer_group_association.py index bf610fd88..8480873e6 100644 --- a/distributedcloud/dcmanager/api/policies/peer_group_association.py +++ b/distributedcloud/dcmanager/api/policies/peer_group_association.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:peer_group_associations:%s' diff --git a/distributedcloud/dcmanager/api/policies/phased_subcloud_deploy.py b/distributedcloud/dcmanager/api/policies/phased_subcloud_deploy.py index 3c6c0a142..408e1122e 100644 --- a/distributedcloud/dcmanager/api/policies/phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/api/policies/phased_subcloud_deploy.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:phased_subcloud_deploy:%s' diff --git a/distributedcloud/dcmanager/api/policies/subcloud_backup.py b/distributedcloud/dcmanager/api/policies/subcloud_backup.py index 307811bf4..53ff1d6e5 100644 --- a/distributedcloud/dcmanager/api/policies/subcloud_backup.py +++ b/distributedcloud/dcmanager/api/policies/subcloud_backup.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:subcloud_backup:%s' diff --git a/distributedcloud/dcmanager/api/policies/subcloud_deploy.py b/distributedcloud/dcmanager/api/policies/subcloud_deploy.py index f96fc93f5..14b6f7558 100644 --- a/distributedcloud/dcmanager/api/policies/subcloud_deploy.py +++ b/distributedcloud/dcmanager/api/policies/subcloud_deploy.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022-2023 Wind River Systems, Inc. +# Copyright (c) 2022-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:subcloud_deploy:%s' diff --git a/distributedcloud/dcmanager/api/policies/subcloud_group.py b/distributedcloud/dcmanager/api/policies/subcloud_group.py index b54582ee4..3b941fcc4 100644 --- a/distributedcloud/dcmanager/api/policies/subcloud_group.py +++ b/distributedcloud/dcmanager/api/policies/subcloud_group.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:subcloud_groups:%s' diff --git a/distributedcloud/dcmanager/api/policies/subcloud_peer_group.py b/distributedcloud/dcmanager/api/policies/subcloud_peer_group.py index 14eda80c9..6f73fe4cb 100644 --- a/distributedcloud/dcmanager/api/policies/subcloud_peer_group.py +++ b/distributedcloud/dcmanager/api/policies/subcloud_peer_group.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:subcloud_peer_groups:%s' diff --git a/distributedcloud/dcmanager/api/policies/subclouds.py b/distributedcloud/dcmanager/api/policies/subclouds.py index e8e7b68e8..7206a9967 100644 --- a/distributedcloud/dcmanager/api/policies/subclouds.py +++ b/distributedcloud/dcmanager/api/policies/subclouds.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022-2024 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:subclouds:%s' diff --git a/distributedcloud/dcmanager/api/policies/sw_update_options.py b/distributedcloud/dcmanager/api/policies/sw_update_options.py index 85bd7b064..2d2237328 100644 --- a/distributedcloud/dcmanager/api/policies/sw_update_options.py +++ b/distributedcloud/dcmanager/api/policies/sw_update_options.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:sw_update_options:%s' diff --git a/distributedcloud/dcmanager/api/policies/sw_update_strategy.py b/distributedcloud/dcmanager/api/policies/sw_update_strategy.py index a1391dbcf..6ddea8ee9 100644 --- a/distributedcloud/dcmanager/api/policies/sw_update_strategy.py +++ b/distributedcloud/dcmanager/api/policies/sw_update_strategy.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:sw_update_strategy:%s' diff --git a/distributedcloud/dcmanager/api/policies/system_peers.py b/distributedcloud/dcmanager/api/policies/system_peers.py index 78ef06130..f9a9464cd 100755 --- a/distributedcloud/dcmanager/api/policies/system_peers.py +++ b/distributedcloud/dcmanager/api/policies/system_peers.py @@ -1,11 +1,12 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from dcmanager.api.policies import base from oslo_policy import policy +from dcmanager.api.policies import base + POLICY_ROOT = 'dc_api:system_peers:%s' diff --git a/distributedcloud/dcmanager/api/policy.py b/distributedcloud/dcmanager/api/policy.py index 919608934..a4408f53f 100644 --- a/distributedcloud/dcmanager/api/policy.py +++ b/distributedcloud/dcmanager/api/policy.py @@ -13,17 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + """Policy Engine For DC.""" -from dcmanager.api import policies as controller_policies from oslo_config import cfg from oslo_policy import policy from webob import exc +from dcmanager.api import policies as controller_policies CONF = cfg.CONF _ENFORCER = None diff --git a/distributedcloud/dcmanager/audit/alarm_aggregation.py b/distributedcloud/dcmanager/audit/alarm_aggregation.py index 71aca18b3..239b996e7 100644 --- a/distributedcloud/dcmanager/audit/alarm_aggregation.py +++ b/distributedcloud/dcmanager/audit/alarm_aggregation.py @@ -1,22 +1,24 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # +from oslo_log import log as logging + from dcmanager.common import consts from dcmanager.db import api as db_api -from oslo_log import log as logging - LOG = logging.getLogger(__name__) @@ -40,10 +42,10 @@ class AlarmAggregation(object): LOG.error('Failed to update alarms for %s error: %s' % (name, e)) def _set_cloud_status(self, alarm_dict): - if (alarm_dict.get('critical_alarms') > 0): + if alarm_dict.get('critical_alarms') > 0: status = consts.ALARM_CRITICAL_STATUS - elif (alarm_dict.get('major_alarms') > 0) or\ - (alarm_dict.get('minor_alarms') > 0): + elif (alarm_dict.get('major_alarms') > 0) or \ + (alarm_dict.get('minor_alarms') > 0): status = consts.ALARM_DEGRADED_STATUS else: status = consts.ALARM_OK_STATUS diff --git a/distributedcloud/dcmanager/audit/auditor.py b/distributedcloud/dcmanager/audit/auditor.py index 28191944a..bcfbc3583 100644 --- a/distributedcloud/dcmanager/audit/auditor.py +++ b/distributedcloud/dcmanager/audit/auditor.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2021-2023 Wind River Systems, Inc. +# Copyright (c) 2021-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import abc import six @@ -31,7 +32,9 @@ class Auditor(object): def set_subcloud_endpoint_in_sync(self, sc_name, sc_region): """Set the endpoint sync status of this subcloud to be in sync""" - self._set_subcloud_sync_status(sc_name, sc_region, dccommon_consts.SYNC_STATUS_IN_SYNC) + self._set_subcloud_sync_status( + sc_name, sc_region, dccommon_consts.SYNC_STATUS_IN_SYNC + ) def set_subcloud_endpoint_out_of_sync(self, sc_name, sc_region): """Set the endpoint sync status of this subcloud to be out of sync""" diff --git a/distributedcloud/dcmanager/audit/firmware_audit.py b/distributedcloud/dcmanager/audit/firmware_audit.py index 401fb8717..b3319ee2c 100644 --- a/distributedcloud/dcmanager/audit/firmware_audit.py +++ b/distributedcloud/dcmanager/audit/firmware_audit.py @@ -1,5 +1,5 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -112,16 +112,12 @@ class FirmwareAudit(object): # Filter images which have been applied on RegionOne for image in local_device_images: if image.applied: - filtered_images.append(FirmwareAuditData(image.bitstream_type, - image.bitstream_id, - image.bmc, - image.retimer_included, - image.key_signature, - image.revoke_key_id, - image.applied, - image.pci_vendor, - image.pci_device, - image.applied_labels)) + filtered_images.append(FirmwareAuditData( + image.bitstream_type, image.bitstream_id, image.bmc, + image.retimer_included, image.key_signature, + image.revoke_key_id, image.applied, image.pci_vendor, + image.pci_device, image.applied_labels + )) LOG.debug("RegionOne applied_images: %s" % filtered_images) except Exception: LOG.exception('Cannot retrieve device images for RegionOne, ' @@ -133,24 +129,33 @@ class FirmwareAudit(object): label_key, label_value): for device_label in subcloud_host_device_label_list: if device_label.pcidevice_uuid and \ - device_uuid == device_label.pcidevice_uuid and \ - label_key == device_label.label_key and \ - label_value == device_label.label_value: + device_uuid == device_label.pcidevice_uuid and \ + label_key == device_label.label_key and \ + label_value == device_label.label_value: return True return False - def _check_image_match(self, - subcloud_image, - system_controller_image): - if ((system_controller_image.bitstream_type == consts.BITSTREAM_TYPE_ROOT_KEY and - system_controller_image.key_signature == subcloud_image.key_signature) or - (system_controller_image.bitstream_type == consts.BITSTREAM_TYPE_FUNCTIONAL and - system_controller_image.bitstream_id == subcloud_image.bitstream_id and - system_controller_image.bmc == subcloud_image.bmc and - system_controller_image.retimer_included == subcloud_image.retimer_included) or - (system_controller_image.bitstream_type == consts.BITSTREAM_TYPE_KEY_REVOCATION and - system_controller_image.revoke_key_id == subcloud_image.revoke_key_id)): - return True + def _check_image_match(self, subcloud_image, system_controller_image): + if ( + ( + system_controller_image.bitstream_type == + consts.BITSTREAM_TYPE_ROOT_KEY and + system_controller_image.key_signature == subcloud_image.key_signature + ) or ( + system_controller_image.bitstream_type == + consts.BITSTREAM_TYPE_FUNCTIONAL and + system_controller_image.bitstream_id == + subcloud_image.bitstream_id and + system_controller_image.bmc == subcloud_image.bmc and + system_controller_image.retimer_included == + subcloud_image.retimer_included + ) or ( + system_controller_image.bitstream_type == + consts.BITSTREAM_TYPE_KEY_REVOCATION and + system_controller_image.revoke_key_id == subcloud_image.revoke_key_id + ) + ): + return True return False def _check_subcloud_device_has_image(self, @@ -197,7 +202,7 @@ class FirmwareAudit(object): continue if image.pci_vendor == device.pvendor_id and \ - image.pci_device == device.pdevice_id: + image.pci_device == device.pdevice_id: device_image_state = None subcloud_image = None for device_image_state_obj in subcloud_device_image_states: @@ -220,7 +225,7 @@ class FirmwareAudit(object): return False if device_image_state and \ - device_image_state.status != "completed": + device_image_state.status != "completed": # If device image state is not completed it means # that the image has not been written to the device yet return False @@ -303,12 +308,10 @@ class FirmwareAudit(object): for image in audit_data: # audit_data will be a dict from passing through RPC, so objectify image = FirmwareAuditData.from_dict(image) - proceed = self._check_subcloud_device_has_image(subcloud_name, - sysinv_client, - image, - enabled_host_device_list, - subcloud_device_image_states, - subcloud_device_label_list) + proceed = self._check_subcloud_device_has_image( + subcloud_name, sysinv_client, image, enabled_host_device_list, + subcloud_device_image_states, subcloud_device_label_list + ) if not proceed: out_of_sync = True break diff --git a/distributedcloud/dcmanager/audit/kube_rootca_update_audit.py b/distributedcloud/dcmanager/audit/kube_rootca_update_audit.py index 93f039ef8..ad25c8376 100644 --- a/distributedcloud/dcmanager/audit/kube_rootca_update_audit.py +++ b/distributedcloud/dcmanager/audit/kube_rootca_update_audit.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2021-2023 Wind River Systems, Inc. +# Copyright (c) 2021-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -114,7 +114,7 @@ class KubeRootcaUpdateAudit(Auditor): # the distributed cloud and the subcloud running on old software # version that cannot search for the k8s root CA cert id. if dccommon_utils.is_centos(subcloud.software_version) or \ - not subcloud.rehomed: + not subcloud.rehomed: self.subcloud_audit_alarm_based(subcloud_name, subcloud_region, session) return diff --git a/distributedcloud/dcmanager/audit/patch_audit.py b/distributedcloud/dcmanager/audit/patch_audit.py index b0c0ae8b3..40057abd7 100644 --- a/distributedcloud/dcmanager/audit/patch_audit.py +++ b/distributedcloud/dcmanager/audit/patch_audit.py @@ -1,18 +1,17 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # from keystoneauth1 import exceptions as keystone_exceptions @@ -25,7 +24,6 @@ from dccommon.drivers.openstack.sdk_platform import OpenStackDriver from dccommon.drivers.openstack import software_v1 from dccommon.drivers.openstack.software_v1 import SoftwareClient from dccommon.drivers.openstack.sysinv_v1 import SysinvClient - from dcmanager.common import utils LOG = logging.getLogger(__name__) @@ -155,7 +153,8 @@ class PatchAudit(object): m_os_ks_client = OpenStackDriver( region_name=dccommon_consts.DEFAULT_REGION_NAME, region_clients=None).keystone_client - patching_endpoint = m_os_ks_client.endpoint_cache.get_endpoint('patching') + patching_endpoint = m_os_ks_client.endpoint_cache.get_endpoint( + 'patching') sysinv_endpoint = m_os_ks_client.endpoint_cache.get_endpoint('sysinv') patching_client = PatchingClient( dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session, @@ -195,10 +194,14 @@ class PatchAudit(object): return PatchAuditData(regionone_patches, applied_patch_ids, committed_patch_ids, regionone_software_version) - def subcloud_audit(self, subcloud_name, subcloud_region, audit_data, software_audit_data, - do_load_audit): + def subcloud_audit( + self, subcloud_name, subcloud_region, audit_data, software_audit_data, + do_load_audit + ): if software_audit_data: - self.subcloud_software_audit(subcloud_name, subcloud_region, software_audit_data) + self.subcloud_software_audit( + subcloud_name, subcloud_region, software_audit_data + ) else: self.subcloud_patch_audit(subcloud_name, subcloud_region, audit_data, do_load_audit) diff --git a/distributedcloud/dcmanager/audit/subcloud_audit_manager.py b/distributedcloud/dcmanager/audit/subcloud_audit_manager.py index 3a8b0c19a..3c6197095 100644 --- a/distributedcloud/dcmanager/audit/subcloud_audit_manager.py +++ b/distributedcloud/dcmanager/audit/subcloud_audit_manager.py @@ -1,32 +1,30 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import datetime -import eventlet import os import time -from tsconfig.tsconfig import CONFIG_PATH +import eventlet from oslo_config import cfg from oslo_log import log as logging +from tsconfig.tsconfig import CONFIG_PATH from dccommon import consts as dccommon_consts from dccommon.drivers.openstack import sysinv_v1 - from dcmanager.audit import firmware_audit from dcmanager.audit import kube_rootca_update_audit from dcmanager.audit import kubernetes_audit @@ -81,7 +79,8 @@ class SubcloudAuditManager(manager.Manager): super(SubcloudAuditManager, self).__init__( service_name="subcloud_audit_manager") self.context = context.get_admin_context() - self.audit_worker_rpc_client = dcmanager_audit_rpc_client.ManagerAuditWorkerClient() + self.audit_worker_rpc_client = ( + dcmanager_audit_rpc_client.ManagerAuditWorkerClient()) # Number of audits since last subcloud state update self.audit_count = SUBCLOUD_STATE_UPDATE_ITERATIONS - 2 # Number of patch audits @@ -100,11 +99,13 @@ class SubcloudAuditManager(manager.Manager): def _add_missing_endpoints(self): # Update this flag file based on the most recent new endpoint file_path_list = [] - file_path_list.append(os.path.join(CONFIG_PATH, - '.kube_rootca_update_endpoint_added')) + file_path_list.append(os.path.join( + CONFIG_PATH, '.kube_rootca_update_endpoint_added') + ) if cfg.CONF.use_usm: - file_path_list.append(os.path.join(CONFIG_PATH, - '.usm_endpoint_added')) + file_path_list.append(os.path.join( + CONFIG_PATH, '.usm_endpoint_added') + ) for file_path in file_path_list: # If file exists on the controller, all the endpoints have been # added to DB since last time an endpoint was added @@ -118,7 +119,9 @@ class SubcloudAuditManager(manager.Manager): subcloud.id) # Use set difference to find missing endpoints if cfg.CONF.use_usm: - endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST_USM) + endpoint_type_set = set( + dccommon_consts.ENDPOINT_TYPES_LIST_USM + ) else: endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST) subcloud_set = set() @@ -265,17 +268,18 @@ class SubcloudAuditManager(manager.Manager): SubcloudAuditManager.force_patch_audit): LOG.info("Trigger load audit") audit_load = True - if (self.patch_audit_count % 4 == 1): + if self.patch_audit_count % 4 == 1: LOG.info("Trigger firmware audit") audit_firmware = True # Reset force_firmware_audit only when firmware audit has been fired SubcloudAuditManager.reset_force_firmware_audit() - if (self.patch_audit_count % KUBERNETES_AUDIT_RATE == 1): + if self.patch_audit_count % KUBERNETES_AUDIT_RATE == 1: LOG.info("Trigger kubernetes audit") audit_kubernetes = True - # Reset force_kubernetes_audit only when kubernetes audit has been fired + # Reset force_kubernetes_audit only when kubernetes audit has been + # fired SubcloudAuditManager.reset_force_kubernetes_audit() - if (self.patch_audit_count % KUBE_ROOTCA_UPDATE_AUDIT_RATE == 1): + if self.patch_audit_count % KUBE_ROOTCA_UPDATE_AUDIT_RATE == 1: LOG.info("Trigger kube rootca update audit") audit_kube_rootca_updates = True # Reset force_kube_rootca_update_audit only if audit is fired @@ -319,7 +323,8 @@ class SubcloudAuditManager(manager.Manager): if audit_patch: if cfg.CONF.use_usm: # Query RegionOne releases - software_audit_data = self.patch_audit.get_software_regionone_audit_data() + software_audit_data = \ + self.patch_audit.get_software_regionone_audit_data() else: # Query RegionOne patches and software version patch_audit_data = self.patch_audit.get_regionone_audit_data() @@ -396,7 +401,8 @@ class SubcloudAuditManager(manager.Manager): self.context, last_audit_fixup_threshold) end = datetime.datetime.utcnow() if num_fixed > 0: - LOG.info('Fixed up subcloud audit timestamp for %s subclouds.' % num_fixed) + LOG.info( + 'Fixed up subcloud audit timestamp for %s subclouds.' % num_fixed) LOG.info('Fixup took %s seconds' % (end - start)) subcloud_ids = [] @@ -452,7 +458,8 @@ class SubcloudAuditManager(manager.Manager): kube_rootca_update_audit_data)) # We want a chunksize of at least 1 so add the number of workers. - chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // CONF.audit_worker_workers + chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // ( + CONF.audit_worker_workers) for audit in subcloud_audits: subcloud_ids.append(audit.subcloud_id) if len(subcloud_ids) == chunksize: @@ -466,7 +473,10 @@ class SubcloudAuditManager(manager.Manager): do_openstack_audit, kube_rootca_update_audit_data, software_audit_data) - LOG.debug('Sent subcloud audit request message for subclouds: %s' % subcloud_ids) + LOG.debug( + 'Sent subcloud audit request message for subclouds: %s' % + subcloud_ids + ) subcloud_ids = [] if len(subcloud_ids) > 0: # We've got a partial batch...send it off for processing. @@ -479,6 +489,9 @@ class SubcloudAuditManager(manager.Manager): do_openstack_audit, kube_rootca_update_audit_data, software_audit_data) - LOG.debug('Sent final subcloud audit request message for subclouds: %s' % subcloud_ids) + LOG.debug( + 'Sent final subcloud audit request message for subclouds: %s' % + subcloud_ids + ) else: LOG.debug('Done sending audit request messages.') diff --git a/distributedcloud/dcmanager/audit/subcloud_audit_worker_manager.py b/distributedcloud/dcmanager/audit/subcloud_audit_worker_manager.py index 7d36d1228..59fc94693 100644 --- a/distributedcloud/dcmanager/audit/subcloud_audit_worker_manager.py +++ b/distributedcloud/dcmanager/audit/subcloud_audit_worker_manager.py @@ -1,18 +1,17 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import os @@ -23,7 +22,6 @@ from oslo_log import log as logging from dccommon import consts as dccommon_consts from dccommon.drivers.openstack.sdk_platform import OpenStackDriver - from dcmanager.audit import alarm_aggregation from dcmanager.audit import firmware_audit from dcmanager.audit import kube_rootca_update_audit @@ -131,8 +129,9 @@ class SubcloudAuditWorkerManager(manager.Manager): subcloud.deploy_status)) or ( (subcloud.deploy_status in [ consts.DEPLOY_STATE_INSTALLING, - consts.DEPLOY_STATE_REHOME_PENDING]) and - subcloud.availability_status == dccommon_consts.AVAILABILITY_OFFLINE): + consts.DEPLOY_STATE_REHOME_PENDING]) + and subcloud.availability_status == + dccommon_consts.AVAILABILITY_OFFLINE): LOG.debug("Skip subcloud %s audit, deploy_status: %s" % (subcloud.name, subcloud.deploy_status)) # This DB API call will set the "audit_finished_at" timestamp @@ -399,7 +398,7 @@ class SubcloudAuditWorkerManager(manager.Manager): except keystone_exceptions.NotFound: if subcloud.first_identity_sync_complete \ - and avail_status_current == dccommon_consts.AVAILABILITY_ONLINE: + and avail_status_current == dccommon_consts.AVAILABILITY_ONLINE: # The first identity sync is already complete # Therefore this is an error LOG.error("Identity or Platform endpoint for online " diff --git a/distributedcloud/dcmanager/audit/utils.py b/distributedcloud/dcmanager/audit/utils.py index bc97c3454..427ba7110 100644 --- a/distributedcloud/dcmanager/audit/utils.py +++ b/distributedcloud/dcmanager/audit/utils.py @@ -1,15 +1,16 @@ -# Copyright (c) 2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2021, 2024 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # # # The right to copy, distribute, modify, or otherwise make use @@ -26,17 +27,17 @@ def request_subcloud_audits(context, audit_firmware=False, audit_kubernetes=False, audit_kube_rootca=False): - values = {} - if update_subcloud_state: - values['state_update_requested'] = True - if audit_patch: - values['patch_audit_requested'] = True - if audit_load: - values['load_audit_requested'] = True - if audit_firmware: - values['firmware_audit_requested'] = True - if audit_kubernetes: - values['kubernetes_audit_requested'] = True - if audit_kube_rootca: - values['kube_rootca_update_audit_requested'] = True - db_api.subcloud_audits_update_all(context, values) + values = {} + if update_subcloud_state: + values['state_update_requested'] = True + if audit_patch: + values['patch_audit_requested'] = True + if audit_load: + values['load_audit_requested'] = True + if audit_firmware: + values['firmware_audit_requested'] = True + if audit_kubernetes: + values['kubernetes_audit_requested'] = True + if audit_kube_rootca: + values['kube_rootca_update_audit_requested'] = True + db_api.subcloud_audits_update_all(context, values) diff --git a/distributedcloud/dcmanager/cmd/api.py b/distributedcloud/dcmanager/cmd/api.py index 460fb007d..5b203d999 100644 --- a/distributedcloud/dcmanager/cmd/api.py +++ b/distributedcloud/dcmanager/cmd/api.py @@ -1,5 +1,5 @@ # Copyright 2015 Huawei Technologies Co., Ltd. -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,24 +19,25 @@ # see http://git.openstack.org/cgit/openstack/ironic/tree/ironic/cmd/api.py +import logging as std_logging import sys import eventlet eventlet.monkey_patch(os=False) -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import systemd -from oslo_service import wsgi +# pylint: disable=wrong-import-position +from oslo_config import cfg # noqa: E402 +from oslo_log import log as logging # noqa: E402 +from oslo_service import systemd # noqa: E402 +from oslo_service import wsgi # noqa: E402 -import logging as std_logging +from dcmanager.api import api_config # noqa: E402 +from dcmanager.api import app # noqa: E402 +from dcmanager.common import config # noqa: E402 +from dcmanager.common import messaging # noqa: E402 +from dcorch.common import messaging as dcorch_messaging # noqa: E402 +# pylint: enable=wrong-import-position -from dcmanager.api import api_config -from dcmanager.api import app - -from dcmanager.common import config -from dcmanager.common import messaging -from dcorch.common import messaging as dcorch_messaging CONF = cfg.CONF config.register_options() LOG = logging.getLogger('dcmanager.api') diff --git a/distributedcloud/dcmanager/cmd/audit.py b/distributedcloud/dcmanager/cmd/audit.py index e5ef11643..1d863aef9 100644 --- a/distributedcloud/dcmanager/cmd/audit.py +++ b/distributedcloud/dcmanager/cmd/audit.py @@ -1,4 +1,6 @@ -# Copyright (c) 2021 Wind River Systems, Inc. +# Copyright (c) 2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,14 +21,15 @@ DC Manager Audit Service. import eventlet eventlet.monkey_patch() -from oslo_config import cfg -from oslo_i18n import _lazy -from oslo_log import log as logging -from oslo_service import service - -from dcmanager.common import config -from dcmanager.common import messaging +# pylint: disable=wrong-import-position +from oslo_config import cfg # noqa: E402 +from oslo_i18n import _lazy # noqa: E402 +from oslo_log import log as logging # noqa: E402 +from oslo_service import service # noqa: E402 +from dcmanager.common import config # noqa: E402 +from dcmanager.common import messaging # noqa: E402 +# pylint: enable=wrong-import-position _lazy.enable_lazy() config.register_options() @@ -55,5 +58,6 @@ def main(): launcher.wait() + if __name__ == '__main__': main() diff --git a/distributedcloud/dcmanager/cmd/audit_worker.py b/distributedcloud/dcmanager/cmd/audit_worker.py index 3af29f55c..01c24a287 100644 --- a/distributedcloud/dcmanager/cmd/audit_worker.py +++ b/distributedcloud/dcmanager/cmd/audit_worker.py @@ -1,4 +1,6 @@ -# Copyright (c) 2021 Wind River Systems, Inc. +# Copyright (c) 2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,14 +21,15 @@ DC Manager Audit Worker Service. import eventlet eventlet.monkey_patch() -from oslo_config import cfg -from oslo_i18n import _lazy -from oslo_log import log as logging -from oslo_service import service - -from dcmanager.common import config -from dcmanager.common import messaging +# pylint: disable=wrong-import-position +from oslo_config import cfg # noqa: E402 +from oslo_i18n import _lazy # noqa: E402 +from oslo_log import log as logging # noqa: E402 +from oslo_service import service # noqa: E402 +from dcmanager.common import config # noqa: E402 +from dcmanager.common import messaging # noqa: E402 +# pylint: enable=wrong-import-position _lazy.enable_lazy() config.register_options() @@ -55,5 +58,6 @@ def main(): launcher.wait() + if __name__ == '__main__': main() diff --git a/distributedcloud/dcmanager/cmd/manage.py b/distributedcloud/dcmanager/cmd/manage.py index 0b185370b..605e32da7 100644 --- a/distributedcloud/dcmanager/cmd/manage.py +++ b/distributedcloud/dcmanager/cmd/manage.py @@ -1,15 +1,17 @@ -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # """ @@ -51,6 +53,7 @@ def add_command_parsers(subparsers): parser.add_argument('version', nargs='?') parser.add_argument('current_version', nargs='?') + command_opt = cfg.SubCommandOpt('command', title='Commands', help='Show available commands.', @@ -76,5 +79,6 @@ def main(): except Exception as e: sys.exit("ERROR: %s" % e) + if __name__ == '__main__': main() diff --git a/distributedcloud/dcmanager/cmd/manager.py b/distributedcloud/dcmanager/cmd/manager.py index 80524bf94..9873906e5 100644 --- a/distributedcloud/dcmanager/cmd/manager.py +++ b/distributedcloud/dcmanager/cmd/manager.py @@ -1,5 +1,7 @@ #!/usr/bin/env python -# Copyright (c) 2017-2021 Wind River Systems, Inc. +# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -18,18 +20,20 @@ DC Manager Engine Server. """ import eventlet + +# pylint: disable=wrong-import-position +from oslo_config import cfg # noqa: E402 +from oslo_i18n import _lazy # noqa: E402 +from oslo_log import log as logging # noqa: E402 +from oslo_service import service # noqa: E402 + +from dcmanager.common import config # noqa: E402 +from dcmanager.common import consts # noqa: E402 +from dcmanager.common import messaging # noqa: E402 +from dcorch.common import messaging as dcorch_messaging # noqa: E402 +# pylint: enable=wrong-import-position + eventlet.monkey_patch() - -from oslo_config import cfg -from oslo_i18n import _lazy -from oslo_log import log as logging -from oslo_service import service - -from dcmanager.common import config -from dcmanager.common import consts -from dcmanager.common import messaging -from dcorch.common import messaging as dcorch_messaging - _lazy.enable_lazy() config.register_options() config.register_keystone_options() @@ -59,5 +63,6 @@ def main(): # srv.create_periodic_tasks() launcher.wait() + if __name__ == '__main__': main() diff --git a/distributedcloud/dcmanager/cmd/orchestrator.py b/distributedcloud/dcmanager/cmd/orchestrator.py index 9bde434a1..f17640dcd 100644 --- a/distributedcloud/dcmanager/cmd/orchestrator.py +++ b/distributedcloud/dcmanager/cmd/orchestrator.py @@ -1,4 +1,6 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,14 +21,15 @@ DC Manager Orchestrator Service. import eventlet eventlet.monkey_patch() -from oslo_config import cfg -from oslo_i18n import _lazy -from oslo_log import log as logging -from oslo_service import service - -from dcmanager.common import config -from dcmanager.common import messaging +# pylint: disable=wrong-import-position +from oslo_config import cfg # noqa: E402 +from oslo_i18n import _lazy # noqa: E402 +from oslo_log import log as logging # noqa: E402 +from oslo_service import service # noqa: E402 +from dcmanager.common import config # noqa: E402 +from dcmanager.common import messaging # noqa: E402 +# pylint: enable=wrong-import-position CONF = cfg.CONF LOG = logging.getLogger('dcmanager.orchestrator') @@ -54,5 +57,6 @@ def main(): launcher.wait() + if __name__ == '__main__': main() diff --git a/distributedcloud/dcmanager/cmd/state.py b/distributedcloud/dcmanager/cmd/state.py index d2d236d59..f8d3cac91 100644 --- a/distributedcloud/dcmanager/cmd/state.py +++ b/distributedcloud/dcmanager/cmd/state.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # The right to copy, distribute, modify, or otherwise make use # of this software may be licensed only pursuant to the terms @@ -26,14 +26,16 @@ DC Manager State Engine Server. import eventlet eventlet.monkey_patch() -from oslo_config import cfg -from oslo_i18n import _lazy -from oslo_log import log as logging -from oslo_service import service +# pylint: disable=wrong-import-position +from oslo_config import cfg # noqa: E402 +from oslo_i18n import _lazy # noqa: E402 +from oslo_log import log as logging # noqa: E402 +from oslo_service import service # noqa: E402 -from dcmanager.common import config -from dcmanager.common import messaging -from dcorch.common import messaging as dcorch_messaging +from dcmanager.common import config # noqa: E402 +from dcmanager.common import messaging # noqa: E402 +from dcorch.common import messaging as dcorch_messaging # noqa: E402 +# pylint: enable=wrong-import-position _lazy.enable_lazy() config.register_options() @@ -65,5 +67,6 @@ def main(): launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.state_workers) launcher.wait() + if __name__ == '__main__': main() diff --git a/distributedcloud/dcmanager/common/consts.py b/distributedcloud/dcmanager/common/consts.py index 828ec4574..8c1c3ce0f 100644 --- a/distributedcloud/dcmanager/common/consts.py +++ b/distributedcloud/dcmanager/common/consts.py @@ -1,16 +1,18 @@ # Copyright (c) 2016 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # RPC_API_VERSION = "1.0" @@ -397,9 +399,10 @@ BITSTREAM_TYPE_KEY_REVOCATION = 'key-revocation' # Platform Backup size default in MB DEFAULT_PERSISTENT_SIZE = 30000 -# Retry values to be used when platform requests fail due to temporary unavailability, which -# may occur during some orchestration steps. The sleep duration and number of retries are shorter, -# since these should only occur if a service is being restarted +# Retry values to be used when platform requests fail due to temporary +# unavailability, which may occur during some orchestration steps. The sleep +# duration and number of retries are shorter, since these should only occur if a +# service is being restarted PLATFORM_RETRY_MAX_ATTEMPTS = 5 PLATFORM_RETRY_SLEEP_MILLIS = 5000 diff --git a/distributedcloud/dcmanager/common/context.py b/distributedcloud/dcmanager/common/context.py index 150debffc..328327b1b 100644 --- a/distributedcloud/dcmanager/common/context.py +++ b/distributedcloud/dcmanager/common/context.py @@ -1,27 +1,29 @@ -# Copyright (c) 2017-2022 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # -import pecan -from pecan import hooks import re -from six.moves.urllib.parse import urlparse import time from oslo_context import context as base_context from oslo_log import log from oslo_utils import encodeutils from oslo_utils import uuidutils +import pecan +from pecan import hooks +from six.moves.urllib.parse import urlparse from dcmanager.api.policies import base as base_policy from dcmanager.api import policy @@ -52,7 +54,6 @@ class RequestContext(base_context.RequestContext): user_domain_name=None, project_domain_name=None, auth_token_info=None, region_name=None, roles=None, password=None, **kwargs): - """Initializer of request context.""" # We still have 'tenant' param because oslo_context still use it. # pylint: disable=E1123 @@ -244,8 +245,8 @@ class AuditLoggingHook(hooks.PecanHook): # [req-088ed3b6-a2c9-483e-b2ad-f1b2d03e06e6 # 3d76d3c1376744e8ad9916a6c3be3e5f # ca53e70c76d847fd860693f8eb301546] - # When the ctx is defined, the formatter (defined in common/log.py) requires that keys - # request_id, user, tenant be defined within the ctx + # When the ctx is defined, the formatter (defined in common/log.py) requires + # that keys request_id, user, tenant be defined within the ctx ctx = {'request_id': request_id, 'user': user_id, 'tenant': tenant_id} @@ -261,4 +262,5 @@ class AuditLoggingHook(hooks.PecanHook): auditLOG.exception("Exception in AuditLoggingHook on event 'after'") def on_error(self, state, e): - auditLOG.exception("Exception in AuditLoggingHook passed to event 'on_error': " + str(e)) + auditLOG.exception("Exception in AuditLoggingHook passed to event " + "'on_error': " + str(e)) diff --git a/distributedcloud/dcmanager/common/exceptions.py b/distributedcloud/dcmanager/common/exceptions.py index 5ff852cd7..7e7bf4f41 100644 --- a/distributedcloud/dcmanager/common/exceptions.py +++ b/distributedcloud/dcmanager/common/exceptions.py @@ -1,6 +1,6 @@ # Copyright 2015 Huawei Technologies Co., Ltd. # Copyright 2015 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -39,14 +39,14 @@ class DCManagerException(Exception): def __init__(self, **kwargs): try: - super(DCManagerException, self).__init__(self.message % kwargs) # pylint: disable=W1645 + super(DCManagerException, self).__init__(self.message % kwargs) self.msg = self.message % kwargs # pylint: disable=W1645 except Exception: with excutils.save_and_reraise_exception() as ctxt: if not self.use_fatal_exceptions(): ctxt.reraise = False # at least get the core message out if something happened - super(DCManagerException, self).__init__(self.message) # pylint: disable=W1645 + super(DCManagerException, self).__init__(self.message) if six.PY2: def __unicode__(self): @@ -259,6 +259,7 @@ class PrestagePreCheckFailedException(DCManagerException): the subcloud can be skipped during orchestrated prestage operations. """ + def __init__(self, subcloud, details, orch_skip=False): self.orch_skip = orch_skip # Subcloud can be none if we are failing diff --git a/distributedcloud/dcmanager/common/manager.py b/distributedcloud/dcmanager/common/manager.py index 6fe89d98c..954b74462 100644 --- a/distributedcloud/dcmanager/common/manager.py +++ b/distributedcloud/dcmanager/common/manager.py @@ -1,6 +1,6 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -72,7 +72,6 @@ class Manager(PeriodicTasks): return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): - """init_host Hook to do additional manager initialization when one requests @@ -84,7 +83,6 @@ class Manager(PeriodicTasks): pass def cleanup_host(self): - """cleanup_host Hook to do cleanup work when the service shuts down. @@ -94,7 +92,6 @@ class Manager(PeriodicTasks): pass def pre_start_hook(self): - """pre_start_hook Hook to provide the manager the ability to do additional @@ -107,7 +104,6 @@ class Manager(PeriodicTasks): pass def post_start_hook(self): - """post_start_hook Hook to provide the manager the ability to do additional diff --git a/distributedcloud/dcmanager/common/phased_subcloud_deploy.py b/distributedcloud/dcmanager/common/phased_subcloud_deploy.py index 1a0d54e47..ed97cb488 100644 --- a/distributedcloud/dcmanager/common/phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/common/phased_subcloud_deploy.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -241,13 +241,13 @@ def validate_subcloud_config(context, payload, operation=None, LOG.exception(e) pecan.abort(400, _("management_end_address invalid: %s") % e) - if not management_start_ip < management_end_ip: + if management_start_ip > management_end_ip: pecan.abort( 400, - _("management_start_address not less than " + _("management_start_address greater than " "management_end_address")) - if not len(netaddr.IPRange(management_start_ip, management_end_ip)) >= \ + if len(netaddr.IPRange(management_start_ip, management_end_ip)) < \ MIN_MANAGEMENT_ADDRESSES: pecan.abort( 400, @@ -379,13 +379,13 @@ def validate_admin_network_config(admin_subnet_str, LOG.exception(e) pecan.abort(400, _("admin_end_address invalid: %s") % e) - if not admin_start_ip < admin_end_ip: + if admin_start_ip > admin_end_ip: pecan.abort( 400, - _("admin_start_address not less than " + _("admin_start_address greater than " "admin_end_address")) - if not len(netaddr.IPRange(admin_start_ip, admin_end_ip)) >= \ + if len(netaddr.IPRange(admin_start_ip, admin_end_ip)) < \ MIN_ADMIN_ADDRESSES: pecan.abort( 400, @@ -975,7 +975,9 @@ def populate_payload_with_pre_existing_data(payload: dict, msg = _("Required %s file was not provided and it was not " "previously available.") % value pecan.abort(400, msg) - payload.update(dict(list(existing_values.items()) + list(payload.items()))) + payload.update( + dict(list(existing_values.items()) + list(payload.items())) + ) elif value == consts.DEPLOY_CONFIG: if not payload.get(consts.DEPLOY_CONFIG): fn = get_config_file_path(subcloud.name, value) diff --git a/distributedcloud/dcmanager/common/serializer.py b/distributedcloud/dcmanager/common/serializer.py index 9c58b94fd..5a443b0d4 100644 --- a/distributedcloud/dcmanager/common/serializer.py +++ b/distributedcloud/dcmanager/common/serializer.py @@ -1,18 +1,18 @@ # Copyright 2015 Huawei Technologies Co., Ltd. -# Copyright (c) 2017, 2019, 2021, 2022 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2022, 2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import oslo_messaging @@ -27,6 +27,7 @@ class Mapping(object): for key, value in mapping.items(): self.reverse_mapping[value] = key + _SINGLETON_MAPPING = Mapping({ ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@", }) diff --git a/distributedcloud/dcmanager/common/utils.py b/distributedcloud/dcmanager/common/utils.py index 34d3087ec..8310d9741 100644 --- a/distributedcloud/dcmanager/common/utils.py +++ b/distributedcloud/dcmanager/common/utils.py @@ -1,43 +1,46 @@ # Copyright 2015 Huawei Technologies Co., Ltd. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import datetime import grp import itertools import json -import netaddr import os -import pecan import pwd import re -import resource as sys_resource -import six.moves import string import subprocess -import tsconfig.tsconfig as tsc import uuid + +import resource as sys_resource import xml.etree.ElementTree as ElementTree + import yaml +import pecan + from keystoneauth1 import exceptions as keystone_exceptions +import netaddr from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import base64 +import six.moves +import tsconfig.tsconfig as tsc from dccommon import consts as dccommon_consts from dccommon.drivers.openstack.sdk_platform import OpenStackDriver @@ -249,43 +252,41 @@ def get_sw_update_strategy_extra_args(context, update_type=None): return {} -def get_sw_update_opts(context, - for_sw_update=False, subcloud_id=None): - """Get sw update options for a subcloud +def get_sw_update_opts(context, for_sw_update=False, subcloud_id=None): + """Get sw update options for a subcloud - :param context: request context object. - :param for_sw_update: return the default options if subcloud options - are empty. Useful for retrieving sw update - options on application of patch strategy. - :param subcloud_id: id of subcloud. + :param context: request context object. + :param for_sw_update: return the default options if subcloud options + are empty. Useful for retrieving sw update + options on application of patch strategy. + :param subcloud_id: id of subcloud. - """ + """ - if subcloud_id is None: - # Requesting defaults. Return constants if no entry in db. + if subcloud_id is None: + # Requesting defaults. Return constants if no entry in db. + sw_update_opts_ref = db_api.sw_update_opts_default_get(context) + if not sw_update_opts_ref: + sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT + return sw_update_opts_dict + else: + # requesting subcloud options + sw_update_opts_ref = db_api.sw_update_opts_get(context, subcloud_id) + if sw_update_opts_ref: + subcloud_name = db_api.subcloud_get(context, subcloud_id).name + return db_api.sw_update_opts_w_name_db_model_to_dict( + sw_update_opts_ref, subcloud_name) + elif for_sw_update: sw_update_opts_ref = db_api.sw_update_opts_default_get(context) if not sw_update_opts_ref: sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT return sw_update_opts_dict else: - # requesting subcloud options - sw_update_opts_ref = db_api.sw_update_opts_get(context, - subcloud_id) - if sw_update_opts_ref: - subcloud_name = db_api.subcloud_get(context, subcloud_id).name - return db_api.sw_update_opts_w_name_db_model_to_dict( - sw_update_opts_ref, subcloud_name) - elif for_sw_update: - sw_update_opts_ref = db_api.sw_update_opts_default_get(context) - if not sw_update_opts_ref: - sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT - return sw_update_opts_dict - else: - raise exceptions.SubcloudPatchOptsNotFound( - subcloud_id=subcloud_id) + raise exceptions.SubcloudPatchOptsNotFound( + subcloud_id=subcloud_id) - return db_api.sw_update_opts_w_name_db_model_to_dict( - sw_update_opts_ref, dccommon_consts.SW_UPDATE_DEFAULT_TITLE) + return db_api.sw_update_opts_w_name_db_model_to_dict( + sw_update_opts_ref, dccommon_consts.SW_UPDATE_DEFAULT_TITLE) def ensure_lock_path(): @@ -618,8 +619,8 @@ def subcloud_peer_group_get_by_ref(context, group_ref): def subcloud_db_list_to_dict(subclouds): - return {'subclouds': [db_api.subcloud_db_model_to_dict(subcloud) - for subcloud in subclouds]} + return {'subclouds': + [db_api.subcloud_db_model_to_dict(subcloud) for subcloud in subclouds]} def get_oam_addresses(subcloud, sc_ks_client): @@ -811,7 +812,7 @@ def find_ansible_error_msg(subcloud_name, log_file, stage=None): else: files_for_search.append(log_file) - if (len(files_for_search) < 2): + if len(files_for_search) < 2: cmd_list = ([cmd_1, cmd_2, files_for_search[0]]) else: cmd_list = ([cmd_1, cmd_2, files_for_search[0], files_for_search[1]]) @@ -858,7 +859,6 @@ def get_failed_task(files): Returns a string with the task and date """ - cmd_1 = 'awk' # awk command to get the information about last failed task. # Match expression starting with 'TASK [' and ending with @@ -873,7 +873,7 @@ def get_failed_task(files): ''') # necessary check since is possible to have # the error in rotated ansible log - if (len(files) < 2): + if len(files) < 2: awk_cmd = ([cmd_1, cmd_2, files[0]]) else: awk_cmd = ([cmd_1, cmd_2, files[0], files[1]]) @@ -903,9 +903,7 @@ def summarize_message(error_msg): Returns a brief message. """ - list_of_strings_to_search_for = [ - 'msg:', 'fail', 'error', 'cmd', 'stderr' - ] + list_of_strings_to_search_for = ['msg:', 'fail', 'error', 'cmd', 'stderr'] brief_message = [] for line in error_msg: for s in list_of_strings_to_search_for: @@ -933,10 +931,9 @@ def is_valid_for_backup_operation(operation, subcloud, bootstrap_address_dict=No def _is_valid_for_backup_create(subcloud): - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE \ - or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED \ - or subcloud.deploy_status not in consts.VALID_DEPLOY_STATES_FOR_BACKUP: + or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED \ + or subcloud.deploy_status not in consts.VALID_DEPLOY_STATES_FOR_BACKUP: msg = ('Subcloud %s must be online, managed and have valid ' 'deploy-status for the subcloud-backup ' 'create operation.' % subcloud.name) @@ -946,9 +943,8 @@ def _is_valid_for_backup_create(subcloud): def _is_valid_for_backup_delete(subcloud): - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE \ - or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED: + or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED: msg = ('Subcloud %s must be online and managed for the subcloud-backup' ' delete operation with --local-only option.' % subcloud.name) raise exceptions.ValidateFail(msg) @@ -967,7 +963,7 @@ def _is_valid_for_backup_restore(subcloud, bootstrap_address_dict=None): has_inventory_file = os.path.exists(ansible_subcloud_inventory_file) if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED \ - or subcloud.deploy_status in consts.INVALID_DEPLOY_STATES_FOR_RESTORE: + or subcloud.deploy_status in consts.INVALID_DEPLOY_STATES_FOR_RESTORE: msg = ('Subcloud %s must be unmanaged and in a valid deploy state ' 'for the subcloud-backup restore operation.' % subcloud.name) elif not (has_bootstrap_address or has_install_values or has_inventory_file): @@ -1401,7 +1397,9 @@ def get_sw_version(release=None): def validate_release_version_supported(release_version_to_check): - """Given a release version, check whether it's supported by the current active version. + """Given a release version, check whether it's supported by the current active + + version. :param release_version_to_check: version string to validate diff --git a/distributedcloud/dcmanager/db/api.py b/distributedcloud/dcmanager/db/api.py index 4d651b81d..06a4f50bd 100644 --- a/distributedcloud/dcmanager/db/api.py +++ b/distributedcloud/dcmanager/db/api.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. # + """ Interface for database access. @@ -136,13 +137,12 @@ def subcloud_create(context, name, description, location, software_version, systemcontroller_gateway_ip, deploy_status, error_description, region_name, openstack_installed, group_id, data_install=None): """Create a subcloud.""" - return IMPL.subcloud_create(context, name, description, location, - software_version, - management_subnet, management_gateway_ip, - management_start_ip, management_end_ip, - systemcontroller_gateway_ip, deploy_status, - error_description, region_name, openstack_installed, group_id, - data_install) + return IMPL.subcloud_create( + context, name, description, location, software_version, management_subnet, + management_gateway_ip, management_start_ip, management_end_ip, + systemcontroller_gateway_ip, deploy_status, error_description, region_name, + openstack_installed, group_id, data_install + ) def subcloud_get(context, subcloud_id): @@ -185,29 +185,26 @@ def subcloud_get_all_with_status(context): return IMPL.subcloud_get_all_with_status(context) -def subcloud_update(context, subcloud_id, management_state=None, - availability_status=None, software_version=None, name=None, - description=None, management_subnet=None, management_gateway_ip=None, - management_start_ip=None, management_end_ip=None, - location=None, audit_fail_count=None, - deploy_status=None, backup_status=None, - backup_datetime=None, error_description=None, - openstack_installed=None, group_id=None, - data_install=None, data_upgrade=None, - first_identity_sync_complete=None, - systemcontroller_gateway_ip=None, - peer_group_id=None, rehome_data=None, rehomed=None): +def subcloud_update( + context, subcloud_id, management_state=None, availability_status=None, + software_version=None, name=None, description=None, management_subnet=None, + management_gateway_ip=None, management_start_ip=None, management_end_ip=None, + location=None, audit_fail_count=None, deploy_status=None, backup_status=None, + backup_datetime=None, error_description=None, openstack_installed=None, + group_id=None, data_install=None, data_upgrade=None, + first_identity_sync_complete=None, systemcontroller_gateway_ip=None, + peer_group_id=None, rehome_data=None, rehomed=None +): """Update a subcloud or raise if it does not exist.""" - return IMPL.subcloud_update(context, subcloud_id, management_state, - availability_status, software_version, name, - description, management_subnet, management_gateway_ip, - management_start_ip, management_end_ip, location, - audit_fail_count, deploy_status, backup_status, - backup_datetime, error_description, openstack_installed, - group_id, data_install, data_upgrade, - first_identity_sync_complete, - systemcontroller_gateway_ip, peer_group_id, - rehome_data, rehomed) + return IMPL.subcloud_update( + context, subcloud_id, management_state, availability_status, + software_version, name, description, management_subnet, + management_gateway_ip, management_start_ip, management_end_ip, location, + audit_fail_count, deploy_status, backup_status, backup_datetime, + error_description, openstack_installed, group_id, data_install, data_upgrade, + first_identity_sync_complete, systemcontroller_gateway_ip, peer_group_id, + rehome_data, rehomed + ) def subcloud_bulk_update_by_ids(context, subcloud_ids, update_form): @@ -220,8 +217,6 @@ def subcloud_destroy(context, subcloud_id): return IMPL.subcloud_destroy(context, subcloud_id) -################### - def subcloud_status_create(context, subcloud_id, endpoint_type): """Create a subcloud status for an endpoint_type.""" return IMPL.subcloud_status_create(context, subcloud_id, endpoint_type) @@ -261,7 +256,6 @@ def subcloud_endpoint_status_db_model_to_dict(subcloud_status): def subcloud_status_get(context, subcloud_id, endpoint_type): - """Retrieve the subcloud status for an endpoint Will raise if subcloud does not exist. @@ -766,7 +760,6 @@ def sw_update_opts_update(context, subcloud_id, max_parallel_workers=None, alarm_restriction_type=None, default_instance_action=None): - """Update sw update options or raise if it does not exist.""" return IMPL.sw_update_opts_update(context, subcloud_id, storage_apply_type, @@ -806,7 +799,6 @@ def sw_update_opts_default_update(context, max_parallel_workers=None, alarm_restriction_type=None, default_instance_action=None): - """Update default sw update options.""" return IMPL.sw_update_opts_default_update(context, storage_apply_type, diff --git a/distributedcloud/dcmanager/db/sqlalchemy/api.py b/distributedcloud/dcmanager/db/sqlalchemy/api.py index f98676985..0f27bf37e 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/api.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/api.py @@ -1,18 +1,18 @@ # Copyright (c) 2015 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # """ @@ -20,7 +20,6 @@ Implementation of SQLAlchemy backend. """ import datetime -import sqlalchemy import sys import threading @@ -31,7 +30,7 @@ from oslo_db.sqlalchemy import enginefacade from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils - +import sqlalchemy from sqlalchemy import desc from sqlalchemy import or_ from sqlalchemy.orm.exc import MultipleResultsFound @@ -198,12 +197,14 @@ def subcloud_audits_get_all_need_audit(context, last_audit_threshold): with read_session() as session: result = session.query(models.SubcloudAudits).\ filter_by(deleted=0).\ - filter(models.SubcloudAudits.audit_started_at <= models.SubcloudAudits.audit_finished_at).\ + filter(models.SubcloudAudits.audit_started_at <= + models.SubcloudAudits.audit_finished_at).\ filter((models.SubcloudAudits.audit_finished_at < last_audit_threshold) | (models.SubcloudAudits.patch_audit_requested == true()) | (models.SubcloudAudits.firmware_audit_requested == true()) | (models.SubcloudAudits.load_audit_requested == true()) | - (models.SubcloudAudits.kube_rootca_update_audit_requested == true()) | + (models.SubcloudAudits.kube_rootca_update_audit_requested == + true()) | (models.SubcloudAudits.kubernetes_audit_requested == true())).\ all() return result @@ -334,10 +335,9 @@ def subcloud_get_by_region_name(context, region_name): @require_context def subcloud_get_by_name_or_region_name(context, name): - result = model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter(or_(models.Subcloud.name == name, models.Subcloud.region_name == name)). \ - first() + result = model_query(context, models.Subcloud).filter_by(deleted=0).filter( + or_(models.Subcloud.name == name, models.Subcloud.region_name == name) + ).first() if not result: raise exception.SubcloudNameOrRegionNameNotFound(name=name) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py index e5ec47235..0e5256358 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py @@ -1,23 +1,24 @@ # Copyright (c) 2015 Ericsson AB. -# Copyright (c) 2017-2021 Wind River Systems, Inc. +# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # -from dccommon.drivers.openstack import vim import sqlalchemy +from dccommon.drivers.openstack import vim + def upgrade(migrate_engine): meta = sqlalchemy.MetaData() diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py index 60268f5ea..55c4900c6 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py @@ -1,15 +1,17 @@ -# Copyright (c) 2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import datetime @@ -58,7 +60,8 @@ def upgrade(migrate_engine): subcloud_list = list(subclouds.select().where(subclouds.c.deleted == 0) .order_by(subclouds.c.id).execute()) for subcloud in subcloud_list: - subcloud_audits.insert().execute({'subcloud_id': subcloud['id']}) # pylint: disable=no-value-for-parameter + # pylint: disable-next=no-value-for-parameter + subcloud_audits.insert().execute({'subcloud_id': subcloud['id']}) def downgrade(migrate_engine): diff --git a/distributedcloud/dcmanager/db/sqlalchemy/models.py b/distributedcloud/dcmanager/db/sqlalchemy/models.py index e637b31ed..9a4fc35a4 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/models.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/models.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. # + """ SQLAlchemy models for dcmanager data. """ @@ -22,26 +23,20 @@ import datetime import json from oslo_db.sqlalchemy import models - -from sqlalchemy.orm import backref -from sqlalchemy.orm import relationship -from sqlalchemy.orm import session as orm_session - from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime +from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey from sqlalchemy import Integer +from sqlalchemy.orm import backref +from sqlalchemy.orm import relationship +from sqlalchemy.orm import session as orm_session from sqlalchemy import String from sqlalchemy import Text - -from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.types import TypeDecorator from sqlalchemy.types import VARCHAR - -# from dcmanager.common import consts - BASE = declarative_base() @@ -209,14 +204,17 @@ class SubcloudAudits(BASE, DCManagerBase): subcloud_id = Column(Integer, ForeignKey('subclouds.id', ondelete='CASCADE'), unique=True) - audit_started_at = Column(DateTime(timezone=False), default=datetime.datetime.min) - audit_finished_at = Column(DateTime(timezone=False), default=datetime.datetime.min) + audit_started_at = Column(DateTime(timezone=False), + default=datetime.datetime.min) + audit_finished_at = Column(DateTime(timezone=False), + default=datetime.datetime.min) state_update_requested = Column(Boolean, nullable=False, default=False) patch_audit_requested = Column(Boolean, nullable=False, default=False) load_audit_requested = Column(Boolean, nullable=False, default=False) firmware_audit_requested = Column(Boolean, nullable=False, default=False) kubernetes_audit_requested = Column(Boolean, nullable=False, default=False) - kube_rootca_update_audit_requested = Column(Boolean, nullable=False, default=False) + kube_rootca_update_audit_requested = Column(Boolean, nullable=False, + default=False) spare_audit_requested = Column(Boolean, nullable=False, default=False) spare2_audit_requested = Column(Boolean, nullable=False, default=False) reserved = Column(Text) diff --git a/distributedcloud/dcmanager/manager/peer_group_audit_manager.py b/distributedcloud/dcmanager/manager/peer_group_audit_manager.py index 3db16dc7a..882f91bfd 100644 --- a/distributedcloud/dcmanager/manager/peer_group_audit_manager.py +++ b/distributedcloud/dcmanager/manager/peer_group_audit_manager.py @@ -29,6 +29,7 @@ LOG = logging.getLogger(__name__) class PeerGroupAuditManager(manager.Manager): """Manages audit related tasks.""" + def __init__(self, subcloud_manager, peer_group_id, *args, **kwargs): LOG.debug(_('PeerGroupAuditManager initialization...')) super().__init__(service_name="peer_group_audit_manager", @@ -117,7 +118,7 @@ class PeerGroupAuditManager(manager.Manager): # deploy status to consts.DEPLOY_STATE_REHOME_PENDING to stop cert-mon # audits. if remote_peer_group.get("migration_status") == \ - consts.PEER_GROUP_MIGRATING: + consts.PEER_GROUP_MIGRATING: # Unmanaged all local subclouds of peer group LOG.info("Unmanaged all local subclouds of peer group %s " "since remote is in migrating state" % @@ -130,7 +131,7 @@ class PeerGroupAuditManager(manager.Manager): # an already unmanaged subcloud, so the deploy status # update must be done separately if subcloud.management_state != \ - dccommon_consts.MANAGEMENT_UNMANAGED: + dccommon_consts.MANAGEMENT_UNMANAGED: # Unmanage and update the deploy-status LOG.info("Unmanaging and setting the local subcloud " f"{subcloud.name} deploy status to " @@ -160,7 +161,7 @@ class PeerGroupAuditManager(manager.Manager): # get remote subclouds. For 'managed+online' subclouds, # set 'unmanaged+secondary' to local on same subclouds elif remote_peer_group.get("migration_status") == \ - consts.PEER_GROUP_MIGRATION_COMPLETE: + consts.PEER_GROUP_MIGRATION_COMPLETE or self.require_audit_flag: remote_subclouds = \ self._get_subclouds_by_peer_group_from_system_peer( system_peer, @@ -182,7 +183,7 @@ class PeerGroupAuditManager(manager.Manager): # There will be an exception when unmanage # a subcloud in 'unamaged' state. if subcloud.management_state != \ - dccommon_consts.MANAGEMENT_UNMANAGED: + dccommon_consts.MANAGEMENT_UNMANAGED: self.subcloud_manager.update_subcloud( self.context, subcloud.id, diff --git a/distributedcloud/dcmanager/manager/peer_monitor_manager.py b/distributedcloud/dcmanager/manager/peer_monitor_manager.py index 0f836280c..57de89db1 100644 --- a/distributedcloud/dcmanager/manager/peer_monitor_manager.py +++ b/distributedcloud/dcmanager/manager/peer_monitor_manager.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -131,17 +131,21 @@ class PeerMonitor(object): self._raise_failure() db_api.system_peer_update( self.context, self.peer.id, - availability_state=consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE) + availability_state= # noqa: E251 + consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE + ) failure_count = 0 self._set_require_audit_flag_to_associated_peer_groups() else: failure_count = 0 self._audit_local_peer_groups(remote_pg_list) if self.peer.availability_state != \ - consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE: + consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE: db_api.system_peer_update( self.context, self.peer.id, - availability_state=consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE) + availability_state= # noqa: E251 + consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE + ) LOG.info("DC %s back online, clear alarm" % self.peer.peer_name) self._clear_failure() @@ -167,9 +171,8 @@ class PeerMonitor(object): # Audit for require_audit_flag is True or # Remote peer group is in 'complete' state. if (pgam_obj.require_audit_flag - or remote_peer_group.get("migration_status") - == consts.PEER_GROUP_MIGRATION_COMPLETE - ): + or remote_peer_group.get("migration_status") == + consts.PEER_GROUP_MIGRATION_COMPLETE): pgam_obj.audit_peer_group_from_system( self.peer, remote_peer_group, peer_group) else: @@ -191,7 +194,7 @@ class PeerMonitor(object): return msg def _clean_peer_group_audit_threads(self): - for peer_group_id in self.peer_group_audit_obj_map: + for peer_group_id, _ in self.peer_group_audit_obj_map.items(): pgam_obj = \ self.peer_group_audit_obj_map[peer_group_id] pgam_obj.stop() @@ -235,6 +238,7 @@ class PeerMonitor(object): class PeerMonitorManager(manager.Manager): """Manages tasks related to peer monitor.""" + def __init__(self, subcloud_manager): LOG.debug('PeerMonitorManager initialization...') diff --git a/distributedcloud/dcmanager/manager/service.py b/distributedcloud/dcmanager/manager/service.py index fec561762..83fc82d82 100644 --- a/distributedcloud/dcmanager/manager/service.py +++ b/distributedcloud/dcmanager/manager/service.py @@ -1,22 +1,25 @@ # Copyright (c) 2017-2024 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # +import functools import os -import six import threading -import functools +import six + from oslo_config import cfg from oslo_log import log as logging import oslo_messaging @@ -159,16 +162,18 @@ class DCManagerService(service.Service): # get subcloud by region name LOG.debug("Handling get_subcloud_name_by_region_name request for " "region: %s" % subcloud_region) - subcloud = self.subcloud_manager.get_subcloud_name_by_region_name(context, - subcloud_region) + subcloud = self.subcloud_manager.get_subcloud_name_by_region_name( + context, subcloud_region + ) return subcloud @request_context - def update_subcloud(self, context, subcloud_id, management_state=None, - description=None, location=None, - group_id=None, data_install=None, force=None, - deploy_status=None, - peer_group_id=None, bootstrap_values=None, bootstrap_address=None): + def update_subcloud( + self, context, subcloud_id, management_state=None, description=None, + location=None, group_id=None, data_install=None, force=None, + deploy_status=None, peer_group_id=None, bootstrap_values=None, + bootstrap_address=None + ): # Updates a subcloud LOG.info("Handling update_subcloud request for: %s" % subcloud_id) subcloud = self.subcloud_manager.update_subcloud(context, subcloud_id, @@ -188,9 +193,8 @@ class DCManagerService(service.Service): def update_subcloud_with_network_reconfig(self, context, subcloud_id, payload): LOG.info("Handling update_subcloud_with_network_reconfig request for: %s", subcloud_id) - return self.subcloud_manager.update_subcloud_with_network_reconfig(context, - subcloud_id, - payload) + return self.subcloud_manager.update_subcloud_with_network_reconfig( + context, subcloud_id, payload) @run_in_thread @request_context diff --git a/distributedcloud/dcmanager/manager/subcloud_manager.py b/distributedcloud/dcmanager/manager/subcloud_manager.py index e8f2e74af..66147f15b 100644 --- a/distributedcloud/dcmanager/manager/subcloud_manager.py +++ b/distributedcloud/dcmanager/manager/subcloud_manager.py @@ -1,19 +1,20 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # + from __future__ import division import base64 @@ -95,7 +96,8 @@ ANSIBLE_SUBCLOUD_UPDATE_PLAYBOOK = \ # the support of rehoming a subcloud with a software version below 22.12 ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT = \ consts.ANSIBLE_CURRENT_VERSION_BASE_PATH + \ - '/roles/rehome-subcloud/update-keystone-data/files/validate_keystone_passwords.sh' + '/roles/rehome-subcloud/update-keystone-data/files/' + \ + 'validate_keystone_passwords.sh' USERS_TO_REPLICATE = [ 'sysinv', @@ -344,14 +346,15 @@ class SubcloudManager(manager.Manager): software_version if software_version else SW_VERSION] return bootstrap_command - def compose_config_command(self, subcloud_name, ansible_subcloud_inventory_file, payload): + def compose_config_command( + self, subcloud_name, ansible_subcloud_inventory_file, payload): config_command = [ "ansible-playbook", payload[consts.DEPLOY_PLAYBOOK], "-e", "@%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name + '_deploy_values.yml', "-i", ansible_subcloud_inventory_file, "--limit", subcloud_name - ] + ] return config_command def compose_backup_command(self, subcloud_name, ansible_subcloud_inventory_file): @@ -359,34 +362,42 @@ class SubcloudManager(manager.Manager): "ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_CREATE_PLAYBOOK, "-i", ansible_subcloud_inventory_file, "--limit", subcloud_name, - "-e", "subcloud_bnr_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + - subcloud_name + "_backup_create_values.yml"] - + "-e", + "subcloud_bnr_overrides=%s" % ( + dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name + + "_backup_create_values.yml" + ) + ] return backup_command def compose_backup_delete_command(self, subcloud_name, ansible_subcloud_inventory_file=None): backup_command = [ "ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_DELETE_PLAYBOOK, - "-e", "subcloud_bnr_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + - subcloud_name + "_backup_delete_values.yml"] + "-e", "subcloud_bnr_overrides=%s" % + dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + + subcloud_name + "_backup_delete_values.yml" + ] if ansible_subcloud_inventory_file: # Backup stored in subcloud storage backup_command.extend(("-i", ansible_subcloud_inventory_file, - "--limit", subcloud_name)) + "--limit", subcloud_name)) else: # Backup stored in central storage backup_command.extend(("-e", "inventory_hostname=%s" % subcloud_name)) return backup_command - def compose_backup_restore_command(self, subcloud_name, ansible_subcloud_inventory_file): + def compose_backup_restore_command( + self, subcloud_name, ansible_subcloud_inventory_file): backup_command = [ "ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_RESTORE_PLAYBOOK, "-i", ansible_subcloud_inventory_file, "--limit", subcloud_name, - "-e", "subcloud_bnr_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + - subcloud_name + "_backup_restore_values.yml"] - + "-e", "subcloud_bnr_overrides=%s" % ( + dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name + + "_backup_restore_values.yml" + ) + ] return backup_command def compose_update_command(self, subcloud_name, ansible_subcloud_inventory_file): @@ -395,8 +406,11 @@ class SubcloudManager(manager.Manager): "-i", ansible_subcloud_inventory_file, "--limit", subcloud_name, "--timeout", UPDATE_PLAYBOOK_TIMEOUT, - "-e", "subcloud_update_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + - subcloud_name + "_update_values.yml"] + "-e", "subcloud_update_overrides=%s" % ( + dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name + + "_update_values.yml" + ) + ] return subcloud_update_command def compose_rehome_command(self, subcloud_name, subcloud_region, @@ -447,7 +461,7 @@ class SubcloudManager(manager.Manager): while True: offline_seconds = time.monotonic() - job_done_ts if subcloud.availability_status == \ - dccommon_consts.AVAILABILITY_OFFLINE: + dccommon_consts.AVAILABILITY_OFFLINE: if offline_seconds >= consts.BATCH_REHOME_MGMT_STATES_TIMEOUT: LOG.warning("Skip trying to manage subcloud: %s, " "wait online timeout [%d]" % @@ -487,7 +501,7 @@ class SubcloudManager(manager.Manager): self.context, association.system_peer_id) # Get 'available' system peer if system_peer.availability_state != \ - consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE: + consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE: LOG.warning("Peer system %s offline, skip checking" % system_peer.peer_name) continue @@ -607,16 +621,19 @@ class SubcloudManager(manager.Manager): # subcloud_ref could be int type id. subcloud = utils.subcloud_get_by_ref(context, str(subcloud_ref)) if not subcloud: - LOG.error("Failed to migrate, non-existent subcloud %s" % subcloud_ref) + LOG.error( + "Failed to migrate, non-existent subcloud %s" % subcloud_ref + ) return if 'sysadmin_password' not in payload: - LOG.error("Failed to migrate subcloud: %s, must provide sysadmin_password" % - subcloud.name) + LOG.error("Failed to migrate subcloud: %s, must provide " + "sysadmin_password" % subcloud.name) return - if subcloud.deploy_status not in [consts.DEPLOY_STATE_SECONDARY, - consts.DEPLOY_STATE_REHOME_FAILED, - consts.DEPLOY_STATE_REHOME_PREP_FAILED]: + if subcloud.deploy_status not in [ + consts.DEPLOY_STATE_SECONDARY, consts.DEPLOY_STATE_REHOME_FAILED, + consts.DEPLOY_STATE_REHOME_PREP_FAILED + ]: LOG.error("Failed to migrate subcloud: %s, " "must be in secondary or rehome failure state" % subcloud.name) @@ -628,7 +645,8 @@ class SubcloudManager(manager.Manager): rehome_data = json.loads(subcloud.rehome_data) saved_payload = rehome_data['saved_payload'] # Update sysadmin_password - sysadmin_password = base64.b64decode(payload['sysadmin_password']).decode('utf-8') + sysadmin_password = \ + base64.b64decode(payload['sysadmin_password']).decode('utf-8') saved_payload['sysadmin_password'] = sysadmin_password # Decode admin_password if 'admin_password' in saved_payload: @@ -814,7 +832,8 @@ class SubcloudManager(manager.Manager): :param subcloud_id: id of the subcloud :param payload: subcloud configuration """ - LOG.info(f"Adding subcloud {payload['name']} with region {payload['region_name']}.") + LOG.info(f"Adding subcloud {payload['name']} with region " + f"{payload['region_name']}.") rehoming = payload.get('migrate', '').lower() == "true" secondary = (payload.get('secondary', '').lower() == "true") @@ -1202,8 +1221,9 @@ class SubcloudManager(manager.Manager): :param payload: subcloud resume payload :param deploy_states_to_run: deploy phases pending execution """ - LOG.info("Resuming deployment of subcloud %s. Deploy phases to be executed: %s" - % (subcloud_name, ', '.join(deploy_states_to_run))) + LOG.info( + "Resuming deployment of subcloud %s. Deploy phases to be executed: %s" % + (subcloud_name, ', '.join(deploy_states_to_run))) self.run_deploy_phases(context, subcloud_id, payload, deploy_states_to_run, @@ -1294,7 +1314,8 @@ class SubcloudManager(manager.Manager): :param payload: subcloud configuration :param rehoming: flag indicating if this is part of a rehoming operation :param initial_deployment: initial_deployment flag from subcloud inventory - :param return_as_dict: converts the subcloud DB object to a dict before returning + :param return_as_dict: converts the subcloud DB object to a dict before + returning :return: resulting subcloud DB object or dictionary """ LOG.info("Creating subcloud %s." % payload['name']) @@ -1454,7 +1475,8 @@ class SubcloudManager(manager.Manager): if 'admin_password' in original_payload: # Encode admin_password original_payload['admin_password'] = base64.b64encode( - original_payload['admin_password'].encode("utf-8")).decode('utf-8') + original_payload['admin_password'].encode("utf-8") + ).decode('utf-8') bootstrap_info = utils.create_subcloud_rehome_data_template() bootstrap_info['saved_payload'] = original_payload rehome_data = json.dumps(bootstrap_info) @@ -1914,11 +1936,13 @@ class SubcloudManager(manager.Manager): return subcloud, success @staticmethod - def _build_subcloud_operation_notice(operation, failed_subclouds, invalid_subclouds): + def _build_subcloud_operation_notice( + operation, failed_subclouds, invalid_subclouds): invalid_subcloud_names = [subcloud.name for subcloud in invalid_subclouds] failed_subcloud_names = [subcloud.name for subcloud in failed_subclouds] - notice = "Subcloud backup %s operation completed with warnings:\n" % operation + notice = ( + "Subcloud backup %s operation completed with warnings:\n" % operation) if invalid_subclouds: notice += ("The following subclouds were skipped for local backup " "%s operation: %s." @@ -2417,15 +2441,21 @@ class SubcloudManager(manager.Manager): # both controllers. management_subnet = netaddr.IPNetwork(subcloud.management_subnet) endpoint = keystone_client.endpoint_cache.get_endpoint('sysinv') - sysinv_client = SysinvClient(dccommon_consts.DEFAULT_REGION_NAME, keystone_client.session, - endpoint=endpoint) - cached_regionone_data = self._get_cached_regionone_data(keystone_client, sysinv_client) + sysinv_client = SysinvClient( + dccommon_consts.DEFAULT_REGION_NAME, + keystone_client.session, + endpoint=endpoint + ) + cached_regionone_data = self._get_cached_regionone_data( + keystone_client, sysinv_client) for mgmt_if_uuid in cached_regionone_data['mgmt_interface_uuids']: - sysinv_client.delete_route(mgmt_if_uuid, - str(management_subnet.ip), - management_subnet.prefixlen, - str(netaddr.IPAddress(subcloud.systemcontroller_gateway_ip)), - 1) + sysinv_client.delete_route( + mgmt_if_uuid, + str(management_subnet.ip), + management_subnet.prefixlen, + str(netaddr.IPAddress(subcloud.systemcontroller_gateway_ip)), + 1 + ) @staticmethod def _delete_subcloud_cert(subcloud_region): @@ -2558,7 +2588,7 @@ class SubcloudManager(manager.Manager): mkey = list(data.keys())[0] if mkey in data and 'hosts' in data[mkey] and \ - cur_sc_name in data[mkey]['hosts']: + cur_sc_name in data[mkey]['hosts']: data[mkey]['hosts'][new_sc_name] = \ data[mkey]['hosts'].pop(cur_sc_name) @@ -2758,8 +2788,11 @@ class SubcloudManager(manager.Manager): # it's necessary to save it first, then put it back after # after bootstrap_values is updated. if 'bootstrap-address' in rehome_data_dict['saved_payload']: - _bootstrap_address = rehome_data_dict['saved_payload']['bootstrap-address'] - bootstrap_values_dict = yaml.load(bootstrap_values, Loader=yaml.SafeLoader) + _bootstrap_address = \ + rehome_data_dict['saved_payload']['bootstrap-address'] + bootstrap_values_dict = yaml.load( + bootstrap_values, Loader=yaml.SafeLoader + ) # remove sysadmin_password,ansible_ssh_pass,ansible_become_pass # encode admin_password @@ -2771,11 +2804,13 @@ class SubcloudManager(manager.Manager): del bootstrap_values_dict['ansible_become_pass'] if 'admin_password' in bootstrap_values_dict: bootstrap_values_dict['admin_password'] = base64.b64encode( - bootstrap_values_dict['admin_password'].encode("utf-8")).decode('utf-8') + bootstrap_values_dict['admin_password'].encode("utf-8") + ).decode('utf-8') rehome_data_dict['saved_payload'] = bootstrap_values_dict # put bootstrap_address back into rehome_data_dict if _bootstrap_address: - rehome_data_dict['saved_payload']['bootstrap-address'] = _bootstrap_address + rehome_data_dict['saved_payload'][ + 'bootstrap-address'] = _bootstrap_address # update bootstrap_address if bootstrap_address: @@ -2784,7 +2819,8 @@ class SubcloudManager(manager.Manager): resource='subcloud', msg='Cannot update bootstrap_address into rehome data, ' 'need to import bootstrap_values first') - rehome_data_dict['saved_payload']['bootstrap-address'] = bootstrap_address + rehome_data_dict['saved_payload'][ + 'bootstrap-address'] = bootstrap_address rehome_data = None if rehome_data_dict: @@ -3195,7 +3231,9 @@ class SubcloudManager(manager.Manager): try: subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region) except Exception: - LOG.exception("Failed to get subcloud by region name: %s" % subcloud_region) + LOG.exception( + "Failed to get subcloud by region name: %s" % subcloud_region + ) raise try: @@ -3256,9 +3294,10 @@ class SubcloudManager(manager.Manager): return prestage.prestage_subcloud(context, payload) @utils.synchronized("regionone-data-cache", external=False) - def _get_cached_regionone_data(self, regionone_keystone_client, regionone_sysinv_client=None): - if (not SubcloudManager.regionone_data or - SubcloudManager.regionone_data['expiry'] <= datetime.datetime.utcnow()): + def _get_cached_regionone_data( + self, regionone_keystone_client, regionone_sysinv_client=None): + if (not SubcloudManager.regionone_data or SubcloudManager.regionone_data[ + 'expiry'] <= datetime.datetime.utcnow()): user_list = regionone_keystone_client.get_enabled_users(id_only=False) for user in user_list: if user.name == dccommon_consts.ADMIN_USER_NAME: @@ -3268,15 +3307,18 @@ class SubcloudManager(manager.Manager): elif user.name == dccommon_consts.DCMANAGER_USER_NAME: SubcloudManager.regionone_data['dcmanager_user_id'] = user.id - project_list = regionone_keystone_client.get_enabled_projects(id_only=False) + project_list = regionone_keystone_client.get_enabled_projects( + id_only=False) for project in project_list: if project.name == dccommon_consts.ADMIN_PROJECT_NAME: SubcloudManager.regionone_data['admin_project_id'] = project.id elif project.name == dccommon_consts.SERVICES_USER_NAME: - SubcloudManager.regionone_data['services_project_id'] = project.id + SubcloudManager.regionone_data['services_project_id'] = \ + project.id if regionone_sysinv_client is None: - endpoint = regionone_keystone_client.endpoint_cache.get_endpoint('sysinv') + endpoint = regionone_keystone_client.endpoint_cache.get_endpoint( + 'sysinv') regionone_sysinv_client = SysinvClient( dccommon_consts.DEFAULT_REGION_NAME, regionone_keystone_client.session, @@ -3289,8 +3331,8 @@ class SubcloudManager(manager.Manager): controller.hostname) if mgmt_interface is not None: mgmt_interface_uuids.append(mgmt_interface.uuid) - SubcloudManager.regionone_data['mgmt_interface_uuids'] = mgmt_interface_uuids - + SubcloudManager.regionone_data['mgmt_interface_uuids'] = \ + mgmt_interface_uuids SubcloudManager.regionone_data['mgmt_pool'] = \ regionone_sysinv_client.get_management_address_pool() SubcloudManager.regionone_data['oam_addresses'] = \ @@ -3298,7 +3340,8 @@ class SubcloudManager(manager.Manager): SubcloudManager.regionone_data['expiry'] = \ datetime.datetime.utcnow() + datetime.timedelta(hours=1) - LOG.info("RegionOne cached data updated %s" % SubcloudManager.regionone_data) + LOG.info( + "RegionOne cached data updated %s" % SubcloudManager.regionone_data) cached_regionone_data = SubcloudManager.regionone_data return cached_regionone_data diff --git a/distributedcloud/dcmanager/manager/system_peer_manager.py b/distributedcloud/dcmanager/manager/system_peer_manager.py index ce3afdd13..88fbfe7be 100644 --- a/distributedcloud/dcmanager/manager/system_peer_manager.py +++ b/distributedcloud/dcmanager/manager/system_peer_manager.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -116,7 +116,8 @@ class SystemPeerManager(manager.Manager): """ if SystemPeerManager.get_subcloud_deploy_status(subcloud) not in ( consts.DEPLOY_STATE_SECONDARY_FAILED, - consts.DEPLOY_STATE_SECONDARY): + consts.DEPLOY_STATE_SECONDARY + ): return False return True @@ -305,7 +306,7 @@ class SystemPeerManager(manager.Manager): validation = self._is_valid_for_subcloud_sync(subcloud) if validation != VERIFY_SUBCLOUD_SYNC_IGNORE and \ - validation != VERIFY_SUBCLOUD_SYNC_VALID: + validation != VERIFY_SUBCLOUD_SYNC_VALID: LOG.error(validation) error_msg[subcloud_name] = validation continue @@ -675,7 +676,7 @@ class SystemPeerManager(manager.Manager): LOG.info(f"Deleted Subcloud Peer Group {peer_group_name} " f"on peer site.") except dccommon_exceptions.\ - SubcloudPeerGroupDeleteFailedAssociated: + SubcloudPeerGroupDeleteFailedAssociated: LOG.error(f"Subcloud Peer Group {peer_group_name} " "delete failed as it is associated with System " "Peer on peer site.") diff --git a/distributedcloud/dcmanager/orchestrator/orch_thread.py b/distributedcloud/dcmanager/orchestrator/orch_thread.py index 1d04c018d..e878540d8 100644 --- a/distributedcloud/dcmanager/orchestrator/orch_thread.py +++ b/distributedcloud/dcmanager/orchestrator/orch_thread.py @@ -1,19 +1,20 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # + import abc import datetime import threading @@ -196,7 +197,8 @@ class OrchThread(threading.Thread): return state_operator( region_name=OrchThread.get_region_name(strategy_step)) - def strategy_step_update(self, subcloud_id, state=None, details=None, stage=None): + def strategy_step_update( + self, subcloud_id, state=None, details=None, stage=None): """Update the strategy step in the DB Sets the start and finished timestamp if necessary, based on state. @@ -219,9 +221,10 @@ class OrchThread(threading.Thread): finished_at=finished_at) def _delete_subcloud_worker(self, region, subcloud_id): - db_api.strategy_step_update(self.context, - subcloud_id, - stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED) + db_api.strategy_step_update( + self.context, + subcloud_id, + stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED) if region in self.subcloud_workers: # The orchestration for this subcloud has either # completed/failed/aborted, remove it from the @@ -387,8 +390,8 @@ class OrchThread(threading.Thread): continue elif strategy_step.state == \ consts.STRATEGY_STATE_INITIAL: - if sw_update_strategy.max_parallel_subclouds > len(self.subcloud_workers) \ - and not stop: + if sw_update_strategy.max_parallel_subclouds > \ + len(self.subcloud_workers) and not stop: # Don't start upgrading this subcloud if it has been # unmanaged by the user. If orchestration was already # started, it will be allowed to complete. diff --git a/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py b/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py index ef9002fb9..84cc21aa7 100644 --- a/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py +++ b/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py @@ -1,20 +1,22 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # +from oslo_log import log as logging + from dccommon.drivers.openstack import vim from dcmanager.common import consts from dcmanager.orchestrator.orch_thread import OrchThread @@ -28,7 +30,6 @@ from dcmanager.orchestrator.states.patch.job_data import PatchJobData from dcmanager.orchestrator.states.patch.pre_check import PreCheckState from dcmanager.orchestrator.states.patch.updating_patches import \ UpdatingPatchesState -from oslo_log import log as logging LOG = logging.getLogger(__name__) diff --git a/distributedcloud/dcmanager/orchestrator/service.py b/distributedcloud/dcmanager/orchestrator/service.py index 03f0cf667..2ab612640 100644 --- a/distributedcloud/dcmanager/orchestrator/service.py +++ b/distributedcloud/dcmanager/orchestrator/service.py @@ -1,25 +1,26 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. # -# http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # - -import six import functools + from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import service +import six from dccommon.subprocess_cleanup import SubprocessCleanup from dcmanager.common import consts diff --git a/distributedcloud/dcmanager/orchestrator/states/creating_vim_strategy.py b/distributedcloud/dcmanager/orchestrator/states/creating_vim_strategy.py index e1508f9fc..339a7d549 100644 --- a/distributedcloud/dcmanager/orchestrator/states/creating_vim_strategy.py +++ b/distributedcloud/dcmanager/orchestrator/states/creating_vim_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dccommon.drivers.openstack import vim @@ -120,8 +121,8 @@ class CreatingVIMStrategyState(BaseState): raise_error_if_missing=True) # Check for skip criteria where a failed 'build' might be expected - skip_state = self.skip_check(strategy_step, # pylint: disable=assignment-from-none - subcloud_strategy) + # pylint: disable-next=assignment-from-none + skip_state = self.skip_check(strategy_step, subcloud_strategy) if skip_state is not None: self.info_log(strategy_step, "Skip forward to state:(%s)" % skip_state) diff --git a/distributedcloud/dcmanager/orchestrator/states/firmware/applying_vim_strategy.py b/distributedcloud/dcmanager/orchestrator/states/firmware/applying_vim_strategy.py index c98d8b426..8c0e549b6 100644 --- a/distributedcloud/dcmanager/orchestrator/states/firmware/applying_vim_strategy.py +++ b/distributedcloud/dcmanager/orchestrator/states/firmware/applying_vim_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020 Wind River Systems, Inc. +# Copyright (c) 2020, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dccommon.drivers.openstack import vim @@ -30,7 +31,8 @@ class ApplyingVIMStrategyState(BaseState): def __init__(self, region_name): super(ApplyingVIMStrategyState, self).__init__( - next_state=consts.STRATEGY_STATE_FINISHING_FW_UPDATE, region_name=region_name) + next_state=consts.STRATEGY_STATE_FINISHING_FW_UPDATE, + region_name=region_name) self.max_failed_queries = DEFAULT_MAX_FAILED_QUERIES self.wait_attempts = DEFAULT_MAX_WAIT_ATTEMPTS self.wait_interval = WAIT_INTERVAL diff --git a/distributedcloud/dcmanager/orchestrator/states/firmware/creating_vim_strategy.py b/distributedcloud/dcmanager/orchestrator/states/firmware/creating_vim_strategy.py index 48f8c3531..5a9cfc73b 100644 --- a/distributedcloud/dcmanager/orchestrator/states/firmware/creating_vim_strategy.py +++ b/distributedcloud/dcmanager/orchestrator/states/firmware/creating_vim_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020 Wind River Systems, Inc. +# Copyright (c) 2020, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dccommon.drivers.openstack import vim @@ -21,7 +22,8 @@ class CreatingVIMStrategyState(BaseState): def __init__(self, region_name): super(CreatingVIMStrategyState, self).__init__( - next_state=consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY, region_name=region_name) + next_state=consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY, + region_name=region_name) # max time to wait for the strategy to be built (in seconds) # is: sleep_duration * max_queries self.sleep_duration = DEFAULT_SLEEP_DURATION diff --git a/distributedcloud/dcmanager/orchestrator/states/firmware/finishing_fw_update.py b/distributedcloud/dcmanager/orchestrator/states/firmware/finishing_fw_update.py index ff3f7cebb..0d15995cb 100644 --- a/distributedcloud/dcmanager/orchestrator/states/firmware/finishing_fw_update.py +++ b/distributedcloud/dcmanager/orchestrator/states/firmware/finishing_fw_update.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dccommon import consts as dccommon_consts @@ -107,7 +108,8 @@ class FinishingFwUpdateState(BaseState): break except Exception: if fail_counter >= self.max_failed_queries: - raise Exception("Timeout waiting to query subcloud device image info") + raise Exception( + "Timeout waiting to query subcloud device image info") fail_counter += 1 time.sleep(self.failed_sleep_duration) diff --git a/distributedcloud/dcmanager/orchestrator/states/firmware/importing_firmware.py b/distributedcloud/dcmanager/orchestrator/states/firmware/importing_firmware.py index 53522f797..97aac3a3c 100644 --- a/distributedcloud/dcmanager/orchestrator/states/firmware/importing_firmware.py +++ b/distributedcloud/dcmanager/orchestrator/states/firmware/importing_firmware.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import os from dccommon import consts as dccommon_consts @@ -20,7 +21,8 @@ class ImportingFirmwareState(BaseState): def __init__(self, region_name): super(ImportingFirmwareState, self).__init__( - next_state=consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY, region_name=region_name) + next_state=consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY, + region_name=region_name) def _image_in_list(self, image, image_list): # todo(abailey): FUTURE. There may be other ways that two images can @@ -139,8 +141,8 @@ class ImportingFirmwareState(BaseState): # However, it may not have been applied to this device device_image_state = None for device_image_state_obj in subcloud_device_image_states: - if device_image_state_obj.pcidevice_uuid == device.uuid\ - and device_image_state_obj.image_uuid == image.uuid: + if device_image_state_obj.pcidevice_uuid == device.uuid \ + and device_image_state_obj.image_uuid == image.uuid: device_image_state = device_image_state_obj break else: diff --git a/distributedcloud/dcmanager/orchestrator/states/firmware/utils.py b/distributedcloud/dcmanager/orchestrator/states/firmware/utils.py index f766f5c65..f0b49fe1e 100644 --- a/distributedcloud/dcmanager/orchestrator/states/firmware/utils.py +++ b/distributedcloud/dcmanager/orchestrator/states/firmware/utils.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020 Wind River Systems, Inc. +# Copyright (c) 2020, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import os # Device Image Status - duplicated from sysinv/common/device.py @@ -105,7 +106,7 @@ def determine_image_fields(image): 'bmc', 'retimer_included'] fields = dict((k, str(v)) for (k, v) in vars(image).items() - if k in field_list and not (v is None)) + if k in field_list and v) return fields @@ -116,10 +117,10 @@ def check_for_label_match(subcloud_host_device_label_list, # todo(abailey): should this compare pci_device_uuid or vendor/device for device_label in subcloud_host_device_label_list: if device_label.pcidevice_uuid and \ - device_uuid == device_label.pcidevice_uuid and \ - label_key == device_label.label_key and \ - label_value == device_label.label_value: - return True + device_uuid == device_label.pcidevice_uuid and \ + label_key == device_label.label_key and \ + label_value == device_label.label_value: + return True return False diff --git a/distributedcloud/dcmanager/orchestrator/states/kube/creating_vim_kube_upgrade_strategy.py b/distributedcloud/dcmanager/orchestrator/states/kube/creating_vim_kube_upgrade_strategy.py index b4222d422..012d83f82 100644 --- a/distributedcloud/dcmanager/orchestrator/states/kube/creating_vim_kube_upgrade_strategy.py +++ b/distributedcloud/dcmanager/orchestrator/states/kube/creating_vim_kube_upgrade_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + from dccommon.consts import DEFAULT_REGION_NAME from dccommon.drivers.openstack import vim from dcmanager.common import consts @@ -48,7 +49,9 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState): if to_version is None: sys_kube_versions = \ self.get_sysinv_client(DEFAULT_REGION_NAME).get_kube_versions() - to_version = dcmanager_utils.get_active_kube_version(sys_kube_versions) + to_version = dcmanager_utils.get_active_kube_version( + sys_kube_versions + ) if to_version is None: # No active target kube version on the system controller means # the system controller is part-way through a kube upgrade @@ -59,7 +62,9 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState): kube_versions = \ self.get_sysinv_client(region).get_kube_versions() target_kube_version = \ - dcmanager_utils.select_available_kube_version(kube_versions, to_version) + dcmanager_utils.select_available_kube_version( + kube_versions, to_version + ) # Get the update options opts_dict = dcmanager_utils.get_sw_update_opts( diff --git a/distributedcloud/dcmanager/orchestrator/states/kube/pre_check.py b/distributedcloud/dcmanager/orchestrator/states/kube/pre_check.py index 1744c0b9e..df11f222f 100644 --- a/distributedcloud/dcmanager/orchestrator/states/kube/pre_check.py +++ b/distributedcloud/dcmanager/orchestrator/states/kube/pre_check.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2021-2022 Wind River Systems, Inc. +# Copyright (c) 2021-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import re from dccommon.consts import DEFAULT_REGION_NAME @@ -14,8 +15,8 @@ from dcmanager.common import utils from dcmanager.db import api as db_api from dcmanager.orchestrator.states.base import BaseState -# These following alarms can occur during a vim orchestrated k8s upgrade on the subcloud. -# By ignoring the alarms, subcloud k8s upgrade can be +# These following alarms can occur during a vim orchestrated k8s upgrade on the +# subcloud. By ignoring the alarms, subcloud k8s upgrade can be # retried after a failure using DC orchestrator. ALARM_IGNORE_LIST = ['100.003', '200.001', '700.004', '750.006', '900.007', '900.401'] @@ -45,7 +46,8 @@ class KubeUpgradePreCheckState(BaseState): rather than the 'available' version in the subcloud. This allows a partially upgraded subcloud to be skipped. """ - system_health = self.get_sysinv_client(self.region_name).get_kube_upgrade_health() + system_health = self.get_sysinv_client( + self.region_name).get_kube_upgrade_health() fails = re.findall("\[Fail\]", system_health) failed_alarm_check = re.findall("No alarms: \[Fail\]", system_health) no_mgmt_alarms = re.findall("\[0\] of which are management affecting", @@ -57,17 +59,19 @@ class KubeUpgradePreCheckState(BaseState): for alarm in alarms: if alarm.alarm_id not in ALARM_IGNORE_LIST: if alarm.mgmt_affecting == "True": - error_desc_msg = ("Kubernetes upgrade health check failed due to alarm %s. " - "Kubernetes upgrade health: \n %s" % - (alarm.alarm_id, system_health)) + error_desc_msg = ( + "Kubernetes upgrade health check failed due to alarm " + "%s. Kubernetes upgrade health: \n %s" % ( + alarm.alarm_id, system_health)) db_api.subcloud_update( self.context, strategy_step.subcloud_id, error_description=error_desc_msg) self.error_log(strategy_step, "\n" + system_health) - raise Exception(("Kubernetes upgrade health check failed due to alarm %s. " - "Please run 'system health-query-kube-upgrade' " - "command on the subcloud or %s on central for details." % - (alarm.alarm_id, ERROR_DESC_CMD))) + raise Exception(( + "Kubernetes upgrade health check failed due to alarm " + "%s. Please run 'system health-query-kube-upgrade' " + "command on the subcloud or %s on central for details." % + (alarm.alarm_id, ERROR_DESC_CMD))) else: error_desc_msg = ("Kubernetes upgrade health check failed. \n %s" % system_health) @@ -114,7 +118,9 @@ class KubeUpgradePreCheckState(BaseState): subcloud_kube_versions = \ self.get_sysinv_client(self.region_name).get_kube_versions() target_version = \ - utils.select_available_kube_version(subcloud_kube_versions, to_version) + utils.select_available_kube_version( + subcloud_kube_versions, to_version + ) self.debug_log(strategy_step, "Pre-Check. Available Kubernetes upgrade:(%s)" % target_version) diff --git a/distributedcloud/dcmanager/orchestrator/states/patch/job_data.py b/distributedcloud/dcmanager/orchestrator/states/patch/job_data.py index e63bc3eed..1eba4486a 100644 --- a/distributedcloud/dcmanager/orchestrator/states/patch/job_data.py +++ b/distributedcloud/dcmanager/orchestrator/states/patch/job_data.py @@ -1,14 +1,15 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # +from oslo_log import log as logging + from dccommon import consts as dccommon_consts from dccommon.drivers.openstack import patching_v1 from dcmanager.common import utils from dcmanager.orchestrator.orch_thread import OrchThread -from oslo_log import log as logging LOG = logging.getLogger(__name__) diff --git a/distributedcloud/dcmanager/orchestrator/states/patch/pre_check.py b/distributedcloud/dcmanager/orchestrator/states/patch/pre_check.py index 486df0058..364126db6 100644 --- a/distributedcloud/dcmanager/orchestrator/states/patch/pre_check.py +++ b/distributedcloud/dcmanager/orchestrator/states/patch/pre_check.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -22,7 +22,7 @@ class PreCheckState(BaseState): alarms = self.get_fm_client(self.region_name).get_alarms() for alarm in alarms: if alarm.mgmt_affecting == "True" and \ - alarm.alarm_id not in ignored_alarms: + alarm.alarm_id not in ignored_alarms: return True # No management affecting alarms return False diff --git a/distributedcloud/dcmanager/orchestrator/states/software/deploy_start.py b/distributedcloud/dcmanager/orchestrator/states/software/deploy_start.py index 3b9787f2b..8dc0d88b7 100644 --- a/distributedcloud/dcmanager/orchestrator/states/software/deploy_start.py +++ b/distributedcloud/dcmanager/orchestrator/states/software/deploy_start.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -42,7 +42,7 @@ class DeployStartState(BaseState): # Find the max version deployed on the SystemController max_version = None - for release_id in deployed_releases: + for release_id, _ in deployed_releases.items(): release_sw_version = deployed_releases[release_id]['sw_version'] if max_version is None or release_sw_version > max_version: max_version = release_sw_version diff --git a/distributedcloud/dcmanager/orchestrator/states/software/finish_strategy.py b/distributedcloud/dcmanager/orchestrator/states/software/finish_strategy.py index 395704147..3cc6230f2 100644 --- a/distributedcloud/dcmanager/orchestrator/states/software/finish_strategy.py +++ b/distributedcloud/dcmanager/orchestrator/states/software/finish_strategy.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -30,8 +30,10 @@ class FinishStrategyState(BaseState): state=software_v1.COMMITTED ) - self.debug_log(strategy_step, - "regionone_committed_releases: %s" % regionone_committed_releases) + self.debug_log( + strategy_step, + "regionone_committed_releases: %s" % regionone_committed_releases + ) try: software_client = self.get_software_client(self.region_name) @@ -66,8 +68,9 @@ class FinishStrategyState(BaseState): try: software_client.delete(releases_to_delete) except Exception: - message = ("Cannot delete releases from subcloud. Please see logs for" - " details.") + message = \ + ("Cannot delete releases from subcloud. Please see logs for" + " details.") self.exception_log(strategy_step, message) raise Exception(message) diff --git a/distributedcloud/dcmanager/orchestrator/states/software/upload.py b/distributedcloud/dcmanager/orchestrator/states/software/upload.py index 6c7e4c452..ce4b74432 100644 --- a/distributedcloud/dcmanager/orchestrator/states/software/upload.py +++ b/distributedcloud/dcmanager/orchestrator/states/software/upload.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -128,14 +128,16 @@ class UploadState(BaseState): if iso_release in subcloud_releases: if potential_missing_patches: - # Retrieve patches that are present in the system controller and - # not in the subcloud after uploading load to the subcloud. + # Retrieve patches that are present in the system + # controller and not in the subcloud after uploading + # load to the subcloud. missing_patches = self. \ _find_missing_patches(subcloud_releases, potential_missing_patches) if missing_patches: - message = (f"Release files {missing_patches} are missing") + message = \ + (f"Release files {missing_patches} are missing") self.error_log(strategy_step, message) raise Exception(message) break @@ -148,14 +150,19 @@ class UploadState(BaseState): else: # No load was uploaded therefore the patches are really missing. if potential_missing_patches: - message = (f"Release files {potential_missing_patches} are missing") + message = \ + (f"Release files {potential_missing_patches} are missing") self.error_log(strategy_step, message) raise Exception(message) if upload_only: - self.info_log(strategy_step, - (f"{consts.EXTRA_ARGS_UPLOAD_ONLY} option enabled, skipping" - f" forward to state:({consts.STRATEGY_STATE_COMPLETE})")) + self.info_log( + strategy_step, + ( + f"{consts.EXTRA_ARGS_UPLOAD_ONLY} option enabled, skipping" + f" forward to state:({consts.STRATEGY_STATE_COMPLETE})" + ) + ) return consts.STRATEGY_STATE_COMPLETE return self.next_state diff --git a/distributedcloud/dcmanager/orchestrator/states/swact_host.py b/distributedcloud/dcmanager/orchestrator/states/swact_host.py index 99b3a4f1d..13707a5d4 100644 --- a/distributedcloud/dcmanager/orchestrator/states/swact_host.py +++ b/distributedcloud/dcmanager/orchestrator/states/swact_host.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dcmanager.common import consts @@ -49,8 +50,10 @@ class SwactHostState(BaseState): active_host = self.get_sysinv_client(region).get_host(self.active) standby_host = self.get_sysinv_client(region).get_host(self.standby) - # if the desired active host is already the Active Controller, no need for action - if active_host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE: + # if the desired active host is already the Active Controller, no need for + # action + if active_host.capabilities.get('Personality') == \ + consts.PERSONALITY_CONTROLLER_ACTIVE: msg = "Host: %s already the active controller." % (self.active) self.info_log(strategy_step, msg) return self.next_state @@ -71,7 +74,8 @@ class SwactHostState(BaseState): try: # query the administrative state to see if it is the new state. host = self.get_sysinv_client(region).get_host(self.active) - if host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE: + if host.capabilities.get('Personality') == \ + consts.PERSONALITY_CONTROLLER_ACTIVE: msg = "Host: %s is now the active controller." % (self.active) self.info_log(strategy_step, msg) break diff --git a/distributedcloud/dcmanager/orchestrator/states/unlock_host.py b/distributedcloud/dcmanager/orchestrator/states/unlock_host.py index 17da5e234..2f978e2a3 100644 --- a/distributedcloud/dcmanager/orchestrator/states/unlock_host.py +++ b/distributedcloud/dcmanager/orchestrator/states/unlock_host.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time import retrying @@ -113,7 +114,8 @@ class UnlockHostState(BaseState): try: # query the administrative state to see if it is the new state. host = self.get_sysinv_client( - strategy_step.subcloud.region_name).get_host(self.target_hostname) + strategy_step.subcloud.region_name + ).get_host(self.target_hostname) if self.check_host_ready(host): # Success. Break out of the loop. msg = "Host: %s is now: %s %s %s" % (self.target_hostname, diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/activating.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/activating.py index d5fd004f8..671106a25 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/activating.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/activating.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dcmanager.common import consts @@ -29,7 +30,8 @@ class ActivatingUpgradeState(BaseState): def __init__(self, region_name): super(ActivatingUpgradeState, self).__init__( - next_state=consts.STRATEGY_STATE_COMPLETING_UPGRADE, region_name=region_name) + next_state=consts.STRATEGY_STATE_COMPLETING_UPGRADE, + region_name=region_name) # max time to wait (in seconds) is: sleep_duration * max_queries self.sleep_duration = DEFAULT_SLEEP_DURATION self.max_queries = DEFAULT_MAX_QUERIES diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/completing.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/completing.py index 665b701e7..6bef9d96d 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/completing.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/completing.py @@ -1,17 +1,18 @@ # -# Copyright (c) 2020-2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import retrying + import time +import retrying + from dcmanager.common import consts from dcmanager.common.exceptions import StrategyStoppedException from dcmanager.db import api as db_api from dcmanager.orchestrator.states.base import BaseState - # Max time: 10 minutes = 60 queries x 10 seconds between each query DEFAULT_MAX_QUERIES = 60 DEFAULT_SLEEP_DURATION = 10 diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/deleting_load.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/deleting_load.py index 56056276c..18f8b9f26 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/deleting_load.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/deleting_load.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dcmanager.common import consts @@ -53,7 +54,8 @@ class DeletingLoadState(BaseState): # Get a sysinv client each time. It will automatically renew the # token if it is about to expire. - sysinv_client = self.get_sysinv_client(strategy_step.subcloud.region_name) + sysinv_client = \ + self.get_sysinv_client(strategy_step.subcloud.region_name) if len(sysinv_client.get_loads()) == 1: msg = "Load %s deleted." % load_version self.info_log(strategy_step, msg) diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/finishing_patch_strategy.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/finishing_patch_strategy.py index ad2be2bb7..73bde78db 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/finishing_patch_strategy.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/finishing_patch_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + from dccommon.drivers.openstack import patching_v1 from dcmanager.common import consts from dcmanager.common.exceptions import StrategyStoppedException @@ -46,8 +47,9 @@ class FinishingPatchStrategyState(BaseState): state=patching_v1.PATCH_STATE_COMMITTED ) - self.debug_log(strategy_step, - "regionone_committed_patches: %s" % regionone_committed_patches) + self.debug_log( + strategy_step, + "regionone_committed_patches: %s" % regionone_committed_patches) committed_patch_ids = list() for patch_id in regionone_committed_patches.keys(): @@ -77,8 +79,9 @@ class FinishingPatchStrategyState(BaseState): elif subcloud_patches[patch_id]['patchstate'] == \ patching_v1.PATCH_STATE_APPLIED: if patch_id in committed_patch_ids: - self.info_log(strategy_step, - "Patch %s will be committed in subcloud" % patch_id) + self.info_log( + strategy_step, + "Patch %s will be committed in subcloud" % patch_id) patches_to_commit.append(patch_id) if patches_to_delete: diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/importing_load.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/importing_load.py index 991a924f9..601941521 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/importing_load.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/importing_load.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dccommon.exceptions import LoadMaxReached @@ -31,7 +32,8 @@ class ImportingLoadState(BaseState): def __init__(self, region_name): super(ImportingLoadState, self).__init__( - next_state=consts.STRATEGY_STATE_UPDATING_PATCHES, region_name=region_name) + next_state=consts.STRATEGY_STATE_UPDATING_PATCHES, + region_name=region_name) # max time to wait (in seconds) is: sleep_duration * max_queries self.sleep_duration = DEFAULT_SLEEP_DURATION self.max_queries = DEFAULT_MAX_QUERIES @@ -91,8 +93,9 @@ class ImportingLoadState(BaseState): break counter += 1 - self.debug_log(strategy_step, - "Waiting for load %s to complete, iter=%d" % (request_type, counter)) + self.debug_log( + strategy_step, + "Waiting for load %s to complete, iter=%d" % (request_type, counter)) if counter >= self.max_queries: raise Exception("Timeout waiting for %s to complete" % request_type) @@ -111,7 +114,8 @@ class ImportingLoadState(BaseState): self.info_log(strategy_step, "Load:%s already found" % target_version) return True, load_info - elif load.state == consts.IMPORTED_LOAD_STATE or load.state == consts.ERROR_LOAD_STATE: + elif load.state == consts.IMPORTED_LOAD_STATE or \ + load.state == consts.ERROR_LOAD_STATE: load_info['load_id'] = load.id load_info['load_version'] = load.software_version @@ -139,8 +143,8 @@ class ImportingLoadState(BaseState): if load_id_to_be_deleted is not None: self.info_log(strategy_step, "Deleting load %s..." % load_id_to_be_deleted) - self.get_sysinv_client( - strategy_step.subcloud.region_name).delete_load(load_id_to_be_deleted) + self.get_sysinv_client(strategy_step.subcloud.region_name).\ + delete_load(load_id_to_be_deleted) req_info['type'] = LOAD_DELETE_REQUEST_TYPE self._wait_for_request_to_complete(strategy_step, req_info) @@ -151,14 +155,17 @@ class ImportingLoadState(BaseState): if subcloud_type == consts.SYSTEM_MODE_SIMPLEX: # For simplex we only import the load record, not the entire ISO loads = self._read_from_cache(REGION_ONE_SYSTEM_LOAD_CACHE_TYPE) - matches = [load for load in loads if load.software_version == target_version] + matches = [ + load for load in loads if load.software_version == target_version] target_load = matches[0].to_dict() # Send only the required fields - creation_keys = ['software_version', 'compatible_version', 'required_patches'] + creation_keys = ['software_version', + 'compatible_version', + 'required_patches'] target_load = {key: target_load[key] for key in creation_keys} try: - load = self.get_sysinv_client( - strategy_step.subcloud.region_name).import_load_metadata(target_load) + load = self.get_sysinv_client(strategy_step.subcloud.region_name).\ + import_load_metadata(target_load) self.info_log(strategy_step, "Load: %s is now: %s" % ( load.software_version, load.state)) @@ -178,28 +185,34 @@ class ImportingLoadState(BaseState): load_import_retry_counter += 1 try: - # ISO and SIG files are found in the vault under a version directory + # ISO and SIG files are found in the vault under a version + # directory self.info_log(strategy_step, "Getting vault load files...") iso_path, sig_path = utils.get_vault_load_files(target_version) if not iso_path: - message = ("Failed to get upgrade load info for subcloud %s" % - strategy_step.subcloud.name) + message = ( + "Failed to get upgrade load info for subcloud %s" % + strategy_step.subcloud.name) raise Exception(message) - # Call the API. import_load blocks until the load state is 'importing' + # Call the API. import_load blocks until the load state is + # 'importing' self.info_log(strategy_step, "Sending load import request...") load = self.get_sysinv_client( - strategy_step.subcloud.region_name).import_load(iso_path, sig_path) + strategy_step.subcloud.region_name + ).import_load(iso_path, sig_path) break except VaultLoadMissingError: raise except LoadMaxReached: - # A prior import request may have encountered an exception but the request actually - # continued with the import operation in the subcloud. This has been observed when performing - # multiple parallel upgrade in which resource/link may be saturated. In such case allow continue - # for further checks (i.e. at wait_for_request_to_complete) + # A prior import request may have encountered an exception but + # the request actually continued with the import operation in the + # subcloud. This has been observed when performing multiple + # parallel upgrade in which resource/link may be saturated. + # In such case allow continue for further checks + # (i.e. at wait_for_request_to_complete) self.info_log(strategy_step, "Load at max number of loads") break @@ -209,13 +222,14 @@ class ImportingLoadState(BaseState): (e, load_import_retry_counter)) if load_import_retry_counter >= self.max_load_import_retries: self.error_log(strategy_step, str(e)) - raise Exception("Failed to import load. Please check sysinv.log on " - "the subcloud for details.") + raise Exception("Failed to import load. Please check " + "sysinv.log on the subcloud for details.") time.sleep(self.sleep_duration) if load is None: - _, load_info = self._get_subcloud_load_info(strategy_step, target_version) + _, load_info = self._get_subcloud_load_info( + strategy_step, target_version) load_id = load_info.get('load_id') software_version = load_info['load_version'] else: @@ -228,14 +242,16 @@ class ImportingLoadState(BaseState): if software_version != target_version: raise Exception("The imported load was not the expected version.") try: - self.info_log(strategy_step, - "Load import request accepted, load software version = %s" - % software_version) + self.info_log( + strategy_step, + "Load import request accepted, load software version = %s" + % software_version) req_info['load_id'] = load_id req_info['load_version'] = target_version req_info['type'] = LOAD_IMPORT_REQUEST_TYPE - self.info_log(strategy_step, - "Waiting for state to change from importing to imported...") + self.info_log( + strategy_step, + "Waiting for state to change from importing to imported...") self._wait_for_request_to_complete(strategy_step, req_info) except Exception as e: self.error_log(strategy_step, str(e)) diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/installing_license.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/installing_license.py index 6ca45c2da..a9a5b216a 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/installing_license.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/installing_license.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + from dccommon import consts as dccommon_consts from dcmanager.common import consts from dcmanager.common import exceptions @@ -52,9 +53,10 @@ class InstallingLicenseState(BaseState): return self.next_state else: # An unexpected error occurred querying the license - message = ('An unexpected error occurred querying the license %s. Detail: %s' % - (dccommon_consts.SYSTEM_CONTROLLER_NAME, - target_error)) + message = ( + 'An unexpected error occurred querying the license %s. ' + 'Detail: %s' % (dccommon_consts.SYSTEM_CONTROLLER_NAME, + target_error)) db_api.subcloud_update( self.context, strategy_step.subcloud_id, error_description=message[0:consts.ERROR_DESCRIPTION_LENGTH]) diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/migrating_data.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/migrating_data.py index 327c53837..4d9456cfa 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/migrating_data.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/migrating_data.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import os import time @@ -47,7 +48,8 @@ class MigratingDataState(BaseState): def __init__(self, region_name): super(MigratingDataState, self).__init__( - next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_0, region_name=region_name) + next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_0, + region_name=region_name) self.max_api_queries = DEFAULT_MAX_API_QUERIES self.api_sleep_duration = DEFAULT_API_SLEEP @@ -141,8 +143,9 @@ class MigratingDataState(BaseState): strategy_step.subcloud.name + consts.INVENTORY_FILE_POSTFIX) log_file = os.path.join(consts.DC_ANSIBLE_LOG_DIR, subcloud.name) + \ '_playbook_output.log' - # Send skip_patching=true to prevent the playbook from applying any patches present in the - # upgrade_data. All the required patches will be included in the generated install iso. + # Send skip_patching=true to prevent the playbook from applying any patches + # present in the upgrade_data. All the required patches will be included in + # the generated install iso. data_migrating_cmd = [ "ansible-playbook", ANSIBLE_UPGRADE_PLAYBOOK, "-i", ansible_subcloud_inventory_file, "-e", @@ -156,7 +159,8 @@ class MigratingDataState(BaseState): # Two error messages: one for subcloud error description and logs and # one for orchestrator strategy_step detail (shorter than the previous). msg_subcloud = utils.find_ansible_error_msg( - strategy_step.subcloud.name, log_file, consts.DEPLOY_STATE_MIGRATING_DATA) + strategy_step.subcloud.name, log_file, + consts.DEPLOY_STATE_MIGRATING_DATA) # Get script output in case it is available error_msg = utils.get_failure_msg(strategy_step.subcloud.region_name) failure = ('%s \n%s' % (error_msg, msg_subcloud)) diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/pre_check.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/pre_check.py index cda364fb2..da4277459 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/pre_check.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/pre_check.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import copy import re @@ -40,10 +41,12 @@ class PreCheckState(BaseState): def __init__(self, region_name): super(PreCheckState, self).__init__( - next_state=consts.STRATEGY_STATE_INSTALLING_LICENSE, region_name=region_name) + next_state=consts.STRATEGY_STATE_INSTALLING_LICENSE, + region_name=region_name) - def _check_health(self, strategy_step, subcloud_sysinv_client, subcloud_fm_client, - host, upgrades): + def _check_health( + self, strategy_step, subcloud_sysinv_client, subcloud_fm_client, + host, upgrades): # Check system upgrade health # @@ -112,21 +115,22 @@ class PreCheckState(BaseState): if not failed_alarm_check: # Health check failure: no alarms involved # - # These could be Kubernetes or other related failure(s) which has not been been - # converted into an alarm condition. + # These could be Kubernetes or other related failure(s) which has not + # been been converted into an alarm condition. error_desc_msg = ("System upgrade health check failed. \n %s" % fails) db_api.subcloud_update( self.context, strategy_step.subcloud_id, error_description=error_desc_msg[0:consts.ERROR_DESCRIPTION_LENGTH]) - details = ("System upgrade health check failed. Please run 'system health-query-upgrade' " - "command on the subcloud or %s on central for details" - % (consts.ERROR_DESC_CMD)) + details = ( + "System upgrade health check failed. Please run " + "'system health-query-upgrade' command on the subcloud or %s " + "on central for details" % (consts.ERROR_DESC_CMD)) self.error_log(strategy_step, "\n" + system_health) raise PreCheckFailedException( subcloud=strategy_step.subcloud.name, details=details, - ) + ) else: # Health check failure: one or more alarms if (upgrades and (len(fails) == len(alarm_ignore_list))): @@ -139,38 +143,42 @@ class PreCheckState(BaseState): for alarm in alarms: if alarm.alarm_id not in alarm_ignore_list: if alarm.mgmt_affecting == "True": - error_desc_msg = ("System upgrade health check failed due to alarm %s. " - "System upgrade health: \n %s" % - (alarm.alarm_id, system_health)) + error_desc_msg = ( + "System upgrade health check failed due to " + "alarm %s. System upgrade health: \n %s" % + (alarm.alarm_id, system_health)) db_api.subcloud_update( self.context, strategy_step.subcloud_id, - error_description=error_desc_msg[0:consts.ERROR_DESCRIPTION_LENGTH]) - details = ("System upgrade health check failed due to alarm %s. " - "Please run 'system health-query-upgrade' " - "command on the subcloud or %s on central for details." % - (alarm.alarm_id, consts.ERROR_DESC_CMD)) + error_description=error_desc_msg[ + 0:consts.ERROR_DESCRIPTION_LENGTH]) + details = ( + "System upgrade health check failed due to " + "alarm %s. Please run 'system health-query-upgrade' " + "command on the subcloud or %s on central for " + "details." % (alarm.alarm_id, consts.ERROR_DESC_CMD)) self.error_log(strategy_step, "\n" + system_health) raise PreCheckFailedException( subcloud=strategy_step.subcloud.name, details=details, - ) + ) else: # Multiple failures - error_desc_msg = ("System upgrade health check failed due to multiple failures. " - "Health: \n %s" % - (system_health)) + error_desc_msg = ( + "System upgrade health check failed due to multiple failures. " + "Health: \n %s" % system_health) db_api.subcloud_update( self.context, strategy_step.subcloud_id, - error_description=error_desc_msg[0:consts.ERROR_DESCRIPTION_LENGTH]) - details = ("System upgrade health check failed due to multiple failures. " - "Please run 'system health-query-upgrade' command on the " - "subcloud or %s on central for details." % - (consts.ERROR_DESC_CMD)) + error_description=error_desc_msg[ + 0:consts.ERROR_DESCRIPTION_LENGTH]) + details = ( + "System upgrade health check failed due to multiple failures. " + "Please run 'system health-query-upgrade' command on the " + "subcloud or %s on central for details." % consts.ERROR_DESC_CMD) self.error_log(strategy_step, "\n" + system_health) raise PreCheckFailedException( subcloud=strategy_step.subcloud.name, details=details, - ) + ) def _check_scratch(self, strategy_step, subcloud_sysinv_client, host): scratch_fs = subcloud_sysinv_client.get_host_filesystem( @@ -182,7 +190,7 @@ class PreCheckState(BaseState): raise PreCheckFailedException( subcloud=strategy_step.subcloud.name, details=details, - ) + ) def _perform_subcloud_online_checks(self, strategy_step, subcloud_sysinv_client, subcloud_fm_client, host, upgrades): @@ -204,8 +212,10 @@ class PreCheckState(BaseState): if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE: subcloud_sysinv_client = None try: - subcloud_sysinv_client = self.get_sysinv_client(strategy_step.subcloud.region_name) - subcloud_fm_client = self.get_fm_client(strategy_step.subcloud.region_name) + subcloud_sysinv_client = \ + self.get_sysinv_client(strategy_step.subcloud.region_name) + subcloud_fm_client = \ + self.get_fm_client(strategy_step.subcloud.region_name) except Exception: # if getting the token times out, the orchestrator may have # restarted and subcloud may be offline; so will attempt @@ -233,14 +243,15 @@ class PreCheckState(BaseState): subcloud=strategy_step.subcloud.name, details=details) + sc_status = subcloud.deploy_status if (host.administrative == consts.ADMIN_LOCKED and - (subcloud.deploy_status == consts.DEPLOY_STATE_INSTALL_FAILED or - subcloud.deploy_status == consts.DEPLOY_STATE_PRE_INSTALL_FAILED)): - # If the subcloud is online but its deploy state is pre-install-failed - # or install-failed and the subcloud host is locked, the upgrading - # simplex step must have failed early in the previous upgrade attempt. - # The pre-check should transition directly to upgrading simplex step in the - # retry. + (sc_status == consts.DEPLOY_STATE_INSTALL_FAILED or + sc_status == consts.DEPLOY_STATE_PRE_INSTALL_FAILED)): + # If the subcloud is online but its deploy state is + # pre-install-failed or install-failed and the subcloud host is + # locked, the upgrading simplex step must have failed early in + # the previous upgrade attempt. The pre-check should transition + # directly to upgrading simplex step in the retry. self.override_next_state(consts.STRATEGY_STATE_UPGRADING_SIMPLEX) return self.next_state @@ -256,103 +267,122 @@ class PreCheckState(BaseState): host, upgrades) if subcloud.deploy_status == consts.DEPLOY_STATE_UPGRADE_ACTIVATED: - # If the subcloud has completed upgrade activation, advance directly - # to completing step. - self.override_next_state(consts.STRATEGY_STATE_COMPLETING_UPGRADE) - elif subcloud.deploy_status == consts.DEPLOY_STATE_DATA_MIGRATION_FAILED: + # If the subcloud has completed upgrade activation, + # advance directly to completing step. + self.override_next_state( + consts.STRATEGY_STATE_COMPLETING_UPGRADE + ) + elif subcloud.deploy_status == \ + consts.DEPLOY_STATE_DATA_MIGRATION_FAILED: # If the subcloud deploy status is data-migration-failed but - # it is online and has passed subcloud online checks, it must have - # timed out while waiting for the subcloud to unlock previously and - # has succesfully been unlocked since. Update the subcloud deploy - # status and advance to activating upgrade step. + # it is online and has passed subcloud online checks, it must + # have timed out while waiting for the subcloud to unlock + # previously and has succesfully been unlocked since. Update + # the subcloud deploy status and advance to activating upgrade + # step. db_api.subcloud_update( self.context, strategy_step.subcloud_id, deploy_status=consts.DEPLOY_STATE_MIGRATED) - self.override_next_state(consts.STRATEGY_STATE_ACTIVATING_UPGRADE) + self.override_next_state( + consts.STRATEGY_STATE_ACTIVATING_UPGRADE + ) elif subcloud.deploy_status == consts.DEPLOY_STATE_MIGRATED: # If the subcloud deploy status is migrated but it is online, it # must have undergone 2 upgrade attempts: # - in 1st upgrade attempt: strategy timed out while waiting # for the subcloud to unlock - # - in 2nd upgrade attempt: the subcloud was unlocked successfully - # (with or without manual interventions) but failed to activate. + # - in 2nd upgrade attempt: the subcloud was unlocked + # successfully (with or without manual interventions) but + # failed to activate. # Advance to activating upgrade step so activation can be retried # after the manual intervention. - self.override_next_state(consts.STRATEGY_STATE_ACTIVATING_UPGRADE) + self.override_next_state( + consts.STRATEGY_STATE_ACTIVATING_UPGRADE + ) else: # Duplex case if upgrades: # If upgrade has started, skip subcloud online checks self.info_log(strategy_step, "Online subcloud checks skipped.") upgrade_state = upgrades[0].state - if(upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED or - upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION): + controllers_state = consts.UPGRADE_STATE_UPGRADING_CONTROLLERS + migration_complete = consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE + + if (upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED + or upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION): error_message = "upgrade state: %s" % upgrade_state raise ManualRecoveryRequiredException( subcloud=strategy_step.subcloud.name, error_message=error_message) - elif(upgrade_state == consts.UPGRADE_STATE_UPGRADING_CONTROLLERS or - upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE): - # At this point the subcloud is duplex, deploy state is complete - # and "system upgrade-show" on the subcloud indicates that the - # upgrade state is "upgrading-controllers". + elif (upgrade_state == controllers_state or + upgrade_state == migration_complete): + # At this point the subcloud is duplex, deploy state is + # completeand "system upgrade-show" on the subcloud indicates + # that the upgrade state is "upgrading-controllers". # If controller-1 is locked then we unlock it, # if controller-0 is active we need to swact # else we can proceed to create the VIM strategy. - controller_1_host = subcloud_sysinv_client.get_host("controller-1") + controller_1_host = subcloud_sysinv_client.get_host( + "controller-1") if controller_1_host.administrative == consts.ADMIN_LOCKED: self.override_next_state( consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1) - elif host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE: + elif host.capabilities.get('Personality') == \ + consts.PERSONALITY_CONTROLLER_ACTIVE: self.override_next_state( consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_1) else: self.override_next_state( consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY) - elif (upgrade_state == consts.UPGRADE_STATE_UPGRADING_HOSTS): - # At this point the subcloud is duplex, deploy state is complete - # and "system upgrade-show" on the subcloud indicates that the - # upgrade state is "upgrading-hosts". + elif upgrade_state == consts.UPGRADE_STATE_UPGRADING_HOSTS: + # At this point the subcloud is duplex, deploy state is + # complete and "system upgrade-show" on the subcloud + # indicates that theupgrade state is "upgrading-hosts". # If both subcloud hosts are upgraded to the newer load, # we resume the state machine from activate upgrade state. # Otherwise, we resume from create the VIM strategy state. - - # determine the version of the system controller in region one - target_version = \ - self._read_from_cache(REGION_ONE_SYSTEM_INFO_CACHE_TYPE)\ - .software_version + # determine the version of the system controller in regionone + target_version = self._read_from_cache( + REGION_ONE_SYSTEM_INFO_CACHE_TYPE).software_version all_hosts_upgraded = True subcloud_hosts = self.get_sysinv_client( strategy_step.subcloud.region_name).get_hosts() for subcloud_host in subcloud_hosts: - if(subcloud_host.software_load != target_version or - subcloud_host.administrative == consts.ADMIN_LOCKED or - subcloud_host.operational == consts.OPERATIONAL_DISABLED): + is_locked = (subcloud_host.administrative == + consts.ADMIN_LOCKED) + is_disabled = (subcloud_host.operational == + consts.OPERATIONAL_DISABLED) + create_vim_state = \ + consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY + if (subcloud_host.software_load != target_version or + is_locked or is_disabled): all_hosts_upgraded = False - self.override_next_state( - consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY) + self.override_next_state(create_vim_state) if all_hosts_upgraded: - if host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE: + if host.capabilities.get('Personality') == \ + consts.PERSONALITY_CONTROLLER_ACTIVE: self.override_next_state( consts.STRATEGY_STATE_ACTIVATING_UPGRADE) else: self.override_next_state( consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0) - elif (upgrade_state == consts.UPGRADE_STATE_ACTIVATION_FAILED): - if(host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE): + elif upgrade_state == consts.UPGRADE_STATE_ACTIVATION_FAILED: + if (host.capabilities.get('Personality') == + consts.PERSONALITY_CONTROLLER_ACTIVE): self.override_next_state( consts.STRATEGY_STATE_ACTIVATING_UPGRADE) else: self.override_next_state( consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0) - elif (upgrade_state == consts.UPGRADE_STATE_ACTIVATION_COMPLETE): - self.override_next_state(consts.STRATEGY_STATE_COMPLETING_UPGRADE) + elif upgrade_state == consts.UPGRADE_STATE_ACTIVATION_COMPLETE: + self.override_next_state( + consts.STRATEGY_STATE_COMPLETING_UPGRADE) else: - # Perform subcloud online check for duplex and proceed to the next step - # (i.e. installing license) + # Perform subcloud online check for duplex and proceed to the + # next step (i.e. installing license) self._perform_subcloud_online_checks(strategy_step, subcloud_sysinv_client, subcloud_fm_client, diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/starting_upgrade.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/starting_upgrade.py index 93aa9c094..91348352d 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/starting_upgrade.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/starting_upgrade.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dccommon.drivers.openstack.vim import ALARM_RESTRICTIONS_RELAXED @@ -96,10 +97,12 @@ class StartingUpgradeState(BaseState): if upgrade_state in UPGRADE_RETRY_STATES: retry_counter += 1 if retry_counter >= self.max_failed_retries: - error_msg = utils.get_failure_msg(strategy_step.subcloud.region_name) + error_msg = utils.get_failure_msg( + strategy_step.subcloud.region_name) db_api.subcloud_update( self.context, strategy_step.subcloud_id, - error_description=error_msg[0:consts.ERROR_DESCRIPTION_LENGTH]) + error_description=error_msg[ + 0:consts.ERROR_DESCRIPTION_LENGTH]) details = ("Failed to start upgrade. Please " "check sysinv.log on the subcloud or " "%s on central for details." % @@ -110,7 +113,8 @@ class StartingUpgradeState(BaseState): % upgrade_state) try: self.get_sysinv_client( - strategy_step.subcloud.region_name).upgrade_start(force=force_flag) + strategy_step.subcloud.region_name).upgrade_start( + force=force_flag) except Exception as exception: self.warn_log(strategy_step, "Encountered exception: %s, " diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/transfer_ca_certificate.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/transfer_ca_certificate.py index 43fc6215c..e92202ae8 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/transfer_ca_certificate.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/transfer_ca_certificate.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2022-2023 Wind River Systems, Inc. +# Copyright (c) 2022-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import time from dcmanager.common import consts @@ -48,14 +49,16 @@ class TransferCACertificateState(BaseState): retry_counter = 0 while True: try: - sysinv_client = self.get_sysinv_client(strategy_step.subcloud.region_name) + sysinv_client = \ + self.get_sysinv_client(strategy_step.subcloud.region_name) data = {'mode': 'openldap_ca'} ldap_ca_cert, ldap_ca_key = utils.get_certificate_from_secret( consts.OPENLDAP_CA_CERT_SECRET_NAME, consts.CERT_NAMESPACE_PLATFORM_CA_CERTS) - sysinv_client.update_certificate('', ldap_ca_cert + ldap_ca_key, data) + sysinv_client.update_certificate( + '', ldap_ca_cert + ldap_ca_key, data) break except Exception as e: self.warn_log(strategy_step, diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/updating_patches.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/updating_patches.py index e217a5b13..3f609b89e 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/updating_patches.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/updating_patches.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import os import time @@ -69,7 +70,8 @@ class UpdatingPatchesState(BaseState): patching_v1.PATCH_STATE_APPLIED, patching_v1.PATCH_STATE_COMMITTED]: applied_patch_ids.append(patch_id) - self.debug_log(strategy_step, "RegionOne applied_patch_ids: %s" % applied_patch_ids) + self.debug_log(strategy_step, + "RegionOne applied_patch_ids: %s" % applied_patch_ids) region = self.get_region_name(strategy_step) # Retrieve all the patches that are present in this subcloud. @@ -96,7 +98,7 @@ class UpdatingPatchesState(BaseState): if subcloud_patches[patch_id]['repostate'] == \ patching_v1.PATCH_STATE_APPLIED: if subcloud_patches[patch_id]['patchstate'] != \ - patching_v1.PATCH_STATE_APPLIED: + patching_v1.PATCH_STATE_APPLIED: self.info_log(strategy_step, "Patch %s will be removed from subcloud" % (patch_id)) diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_duplex.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_duplex.py index 40a9e4673..24d1adcaa 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_duplex.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_duplex.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -24,7 +24,9 @@ class UpgradingDuplexState(BaseState): def __init__(self, region_name): super(UpgradingDuplexState, self).__init__( - next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1, region_name=region_name) + next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1, + region_name=region_name + ) self.target_hostname = "controller-1" # max time to wait (in seconds) is: sleep_duration * max_queries self.sleep_duration = DEFAULT_SLEEP_DURATION @@ -61,8 +63,12 @@ class UpgradingDuplexState(BaseState): upgrades = self.get_sysinv_client(region).get_upgrades() if len(upgrades) != 0: - if (upgrades[0].state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED or - upgrades[0].state == consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE): + if ( + upgrades[0].state == + consts.UPGRADE_STATE_DATA_MIGRATION_FAILED or + upgrades[0].state == + consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE + ): msg = "Upgrade state is %s now" % (upgrades[0].state) self.info_log(strategy_step, msg) break @@ -79,10 +85,11 @@ class UpgradingDuplexState(BaseState): continue api_counter += 1 if api_counter >= self.max_queries: - raise Exception("Timeout waiting for update state to be updated to " - "updated to 'data-migration-failed' or 'data-migration-complete'." - "Please check sysinv.log on the subcloud " - "for details.") + raise Exception( + "Timeout waiting for update state to be updated to " + "'data-migration-failed' or 'data-migration-complete'. " + "Please check sysinv.log on the subcloud for details." + ) time.sleep(self.sleep_duration) # If the upgrade state is 'data-migration-complete' we move to the @@ -95,7 +102,9 @@ class UpgradingDuplexState(BaseState): # The list of upgrades will never contain more than one entry. if upgrades[0].state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED: - raise Exception("Data migration failed on host %s" % self.target_hostname) + raise Exception( + "Data migration failed on host %s" % self.target_hostname + ) # If we reach at this point, the upgrade state is 'data-migration-complete' # and we can move to the next state. diff --git a/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_simplex.py b/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_simplex.py index 7b9a8f5fd..44fd9607d 100644 --- a/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_simplex.py +++ b/distributedcloud/dcmanager/orchestrator/states/upgrade/upgrading_simplex.py @@ -1,12 +1,13 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import json -import keyring import os +import keyring from oslo_serialization import base64 from tsconfig.tsconfig import SW_VERSION @@ -88,7 +89,8 @@ class UpgradingSimplexState(BaseState): subcloud data_install are obtained from: dcmanager database: - subcloud.subcloud_install_initial::for values which are persisted at subcloud_add time + subcloud.subcloud_install_initial::for values which are persisted at + subcloud_add time INSTALL: (needed for upgrade install) bootstrap_interface @@ -103,7 +105,8 @@ class UpgradingSimplexState(BaseState): # Set this options for https with self-signed certificate # no_check_certificate - # Override default filesystem device: also from host-show, but is static. + # Override default filesystem device: also from host-show, but is + static. # rootfs_device: "/dev/disk/by-path/pci-0000:00:1f.2-ata-1.0" # boot_device: "/dev/disk/by-path/pci-0000:00:1f.2-ata-1.0" @@ -111,22 +114,24 @@ class UpgradingSimplexState(BaseState): # rd.net.timeout.ipv6dad: 300 BOOTSTRAP: (also needed for bootstrap) - # If the subcloud's bootstrap IP interface and the system controller are not on the - # same network then the customer must configure a default route or static route - # so that the Central Cloud can login bootstrap the newly installed subcloud. - # If nexthop_gateway is specified and the network_address is not specified then a - # default route will be configured. Otherwise, if a network_address is specified - then - # a static route will be configured. + # If the subcloud's bootstrap IP interface and the system controller + # are not on the same network then the customer must configure a + # default route or static route so that the Central Cloud can login + # bootstrap the newly installed subcloud. If nexthop_gateway is + # specified and the network_address is not specified then a default + # route will be configured. Otherwise, if a network_address is + # specified then a static route will be configured. nexthop_gateway: default_route_address network_address: static_route_address network_mask: static_route_mask subcloud.data_upgrade - persist for upgrade duration - for values from subcloud online sysinv host-show (persist since upgrade-start) + for values from subcloud online sysinv host-show + (persist since upgrade-start) bmc_address # sysinv_v1 host-show bmc_username # sysinv_v1 host-show - for values from barbican_client (as barbican user), or from upgrade-start: + for values from barbican_client (as barbican user), + or from upgrade-start: bmc_password --- obtain from barbican_client as barbican user """ @@ -239,7 +244,7 @@ class UpgradingSimplexState(BaseState): return upgrade_data_install def _get_subcloud_upgrade_data( - self, strategy_step, subcloud_sysinv_client, subcloud_barbican_client): + self, strategy_step, subcloud_sysinv_client, subcloud_barbican_client): """Get the subcloud data required for upgrades. In case the subcloud is no longer reachable, get upgrade_data from @@ -323,8 +328,9 @@ class UpgradingSimplexState(BaseState): def perform_subcloud_install(self, strategy_step, install_values): - log_file = os.path.join(consts.DC_ANSIBLE_LOG_DIR, strategy_step.subcloud.name) + \ - '_playbook_output.log' + log_file = os.path.join( + consts.DC_ANSIBLE_LOG_DIR, + strategy_step.subcloud.name) + '_playbook_output.log' db_api.subcloud_update( self.context, strategy_step.subcloud_id, deploy_status=consts.DEPLOY_STATE_PRE_INSTALL) @@ -375,7 +381,8 @@ class UpgradingSimplexState(BaseState): # Detailed error message for subcloud error description field. # Exception message for strategy_step detail. msg = utils.find_ansible_error_msg( - strategy_step.subcloud.name, log_file, consts.DEPLOY_STATE_INSTALLING) + strategy_step.subcloud.name, log_file, + consts.DEPLOY_STATE_INSTALLING) db_api.subcloud_update( self.context, strategy_step.subcloud_id, deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED, diff --git a/distributedcloud/dcmanager/orchestrator/sw_update_manager.py b/distributedcloud/dcmanager/orchestrator/sw_update_manager.py index fc7dd12a2..2ab15395d 100644 --- a/distributedcloud/dcmanager/orchestrator/sw_update_manager.py +++ b/distributedcloud/dcmanager/orchestrator/sw_update_manager.py @@ -1,26 +1,26 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # + import os import shutil import threading from oslo_config import cfg from oslo_log import log as logging - from tsconfig.tsconfig import SW_VERSION from dccommon import consts as dccommon_consts @@ -137,7 +137,9 @@ class SwUpdateManager(manager.Manager): dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) elif strategy_type == consts.SW_UPDATE_TYPE_UPGRADE: # force option only has an effect in offline case for upgrade - if force and (availability_status != dccommon_consts.AVAILABILITY_ONLINE): + if force and ( + availability_status != dccommon_consts.AVAILABILITY_ONLINE + ): if cfg.CONF.use_usm: return (subcloud_status.endpoint_type == dccommon_consts.ENDPOINT_TYPE_SOFTWARE and @@ -352,14 +354,16 @@ class SwUpdateManager(manager.Manager): else: subcloud_status = db_api.subcloud_status_get( context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD) - if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC: + if subcloud_status.sync_status == \ + dccommon_consts.SYNC_STATUS_IN_SYNC: raise exceptions.BadRequest( resource='strategy', msg='Subcloud %s does not require upgrade' % cloud_name) elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE: subcloud_status = db_api.subcloud_status_get( context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_FIRMWARE) - if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC: + if subcloud_status.sync_status == \ + dccommon_consts.SYNC_STATUS_IN_SYNC: raise exceptions.BadRequest( resource='strategy', msg='Subcloud %s does not require firmware update' @@ -372,7 +376,8 @@ class SwUpdateManager(manager.Manager): subcloud_status = db_api.subcloud_status_get( context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_KUBERNETES) - if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC: + if subcloud_status.sync_status == \ + dccommon_consts.SYNC_STATUS_IN_SYNC: raise exceptions.BadRequest( resource='strategy', msg='Subcloud %s does not require kubernetes update' @@ -385,7 +390,8 @@ class SwUpdateManager(manager.Manager): subcloud_status = db_api.subcloud_status_get( context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA) - if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC: + if subcloud_status.sync_status == \ + dccommon_consts.SYNC_STATUS_IN_SYNC: raise exceptions.BadRequest( resource='strategy', msg='Subcloud %s does not require kube rootca update' @@ -394,7 +400,8 @@ class SwUpdateManager(manager.Manager): # Make sure subcloud requires patching subcloud_status = db_api.subcloud_status_get( context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_PATCHING) - if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC: + if subcloud_status.sync_status == \ + dccommon_consts.SYNC_STATUS_IN_SYNC: raise exceptions.BadRequest( resource='strategy', msg='Subcloud %s does not require patching' % cloud_name) @@ -469,7 +476,8 @@ class SwUpdateManager(manager.Manager): continue if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE: - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE: + if subcloud.availability_status != \ + dccommon_consts.AVAILABILITY_ONLINE: if not force: continue elif cfg.CONF.use_usm: @@ -490,7 +498,8 @@ class SwUpdateManager(manager.Manager): msg='Upgrade sync status is unknown for one or more ' 'subclouds') elif strategy_type == consts.SW_UPDATE_TYPE_PATCH: - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE: + if subcloud.availability_status != \ + dccommon_consts.AVAILABILITY_ONLINE: continue elif (subcloud_status.endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING and @@ -501,7 +510,8 @@ class SwUpdateManager(manager.Manager): msg='Patching sync status is unknown for one or more ' 'subclouds') elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE: - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE: + if subcloud.availability_status != \ + dccommon_consts.AVAILABILITY_ONLINE: continue elif (subcloud_status.endpoint_type == dccommon_consts.ENDPOINT_TYPE_FIRMWARE and @@ -512,7 +522,8 @@ class SwUpdateManager(manager.Manager): msg='Firmware sync status is unknown for one or more ' 'subclouds') elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES: - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE: + if subcloud.availability_status != \ + dccommon_consts.AVAILABILITY_ONLINE: continue elif (subcloud_status.endpoint_type == dccommon_consts.ENDPOINT_TYPE_KUBERNETES and @@ -523,7 +534,8 @@ class SwUpdateManager(manager.Manager): msg='Kubernetes sync status is unknown for one or more ' 'subclouds') elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE: - if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE: + if subcloud.availability_status != \ + dccommon_consts.AVAILABILITY_ONLINE: continue elif (subcloud_status.endpoint_type == dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA and @@ -552,7 +564,8 @@ class SwUpdateManager(manager.Manager): max_parallel_subclouds = 1 if max_parallel_subclouds is None: - max_parallel_subclouds = consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS + max_parallel_subclouds = ( + consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS) strategy_step_created = False # Create the strategy diff --git a/distributedcloud/dcmanager/orchestrator/sw_upgrade_orch_thread.py b/distributedcloud/dcmanager/orchestrator/sw_upgrade_orch_thread.py index 99813d80d..d658132ca 100644 --- a/distributedcloud/dcmanager/orchestrator/sw_upgrade_orch_thread.py +++ b/distributedcloud/dcmanager/orchestrator/sw_upgrade_orch_thread.py @@ -1,19 +1,20 @@ # Copyright 2017 Ericsson AB. -# Copyright (c) 2017-2022 Wind River Systems, Inc. +# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc. +# All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # + from dccommon.drivers.openstack import vim from dcmanager.common import consts from dcmanager.orchestrator.orch_thread import OrchThread @@ -88,7 +89,8 @@ class SwUpgradeOrchThread(OrchThread): consts.STRATEGY_STATE_FINISHING_PATCH_STRATEGY: FinishingPatchStrategyState, consts.STRATEGY_STATE_STARTING_UPGRADE: StartingUpgradeState, - consts.STRATEGY_STATE_TRANSFERRING_CA_CERTIFICATE: TransferCACertificateState, + consts.STRATEGY_STATE_TRANSFERRING_CA_CERTIFICATE: + TransferCACertificateState, consts.STRATEGY_STATE_LOCKING_CONTROLLER_0: LockSimplexState, consts.STRATEGY_STATE_LOCKING_CONTROLLER_1: LockDuplexState, consts.STRATEGY_STATE_UPGRADING_SIMPLEX: UpgradingSimplexState, diff --git a/distributedcloud/dcmanager/rpc/client.py b/distributedcloud/dcmanager/rpc/client.py index 9cb188445..88cf6d486 100644 --- a/distributedcloud/dcmanager/rpc/client.py +++ b/distributedcloud/dcmanager/rpc/client.py @@ -1,15 +1,17 @@ # Copyright (c) 2017-2024 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # """ @@ -83,12 +85,11 @@ class SubcloudStateClient(RPCClient): update_state_only=update_state_only, audit_fail_count=audit_fail_count)) - def update_subcloud_endpoint_status(self, ctxt, subcloud_name=None, - subcloud_region=None, - endpoint_type=None, - sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, - ignore_endpoints=None, - alarmable=True): + def update_subcloud_endpoint_status( + self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None, + sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None, + alarmable=True + ): # Note: This is an asynchronous operation. # See below for synchronous method call return self.cast(ctxt, self.make_msg('update_subcloud_endpoint_status', @@ -99,12 +100,11 @@ class SubcloudStateClient(RPCClient): ignore_endpoints=ignore_endpoints, alarmable=alarmable)) - def update_subcloud_endpoint_status_sync(self, ctxt, subcloud_name=None, - subcloud_region=None, - endpoint_type=None, - sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, - ignore_endpoints=None, - alarmable=True): + def update_subcloud_endpoint_status_sync( + self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None, + sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None, + alarmable=True + ): # Note: synchronous return self.call(ctxt, self.make_msg('update_subcloud_endpoint_status', subcloud_name=subcloud_name, @@ -144,16 +144,20 @@ class ManagerClient(RPCClient): return self.call(ctxt, self.make_msg('delete_subcloud', subcloud_id=subcloud_id)) - def rename_subcloud(self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None): + def rename_subcloud( + self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None + ): return self.call(ctxt, self.make_msg('rename_subcloud', subcloud_id=subcloud_id, curr_subcloud_name=curr_subcloud_name, new_subcloud_name=new_subcloud_name)) - def update_subcloud(self, ctxt, subcloud_id, management_state=None, - description=None, location=None, group_id=None, - data_install=None, force=None, - deploy_status=None, peer_group_id=None, bootstrap_values=None, bootstrap_address=None): + def update_subcloud( + self, ctxt, subcloud_id, management_state=None, description=None, + location=None, group_id=None, data_install=None, force=None, + deploy_status=None, peer_group_id=None, bootstrap_values=None, + bootstrap_address=None + ): return self.call(ctxt, self.make_msg('update_subcloud', subcloud_id=subcloud_id, management_state=management_state, @@ -242,11 +246,12 @@ class ManagerClient(RPCClient): def subcloud_deploy_resume(self, ctxt, subcloud_id, subcloud_name, payload, deploy_states_to_run): - return self.cast(ctxt, self.make_msg('subcloud_deploy_resume', - subcloud_id=subcloud_id, - subcloud_name=subcloud_name, - payload=payload, - deploy_states_to_run=deploy_states_to_run)) + return self.cast(ctxt, self.make_msg( + 'subcloud_deploy_resume', + subcloud_id=subcloud_id, + subcloud_name=subcloud_name, + payload=payload, + deploy_states_to_run=deploy_states_to_run)) def get_subcloud_name_by_region_name(self, ctxt, subcloud_region): return self.call(ctxt, self.make_msg('get_subcloud_name_by_region_name', diff --git a/distributedcloud/dcmanager/state/service.py b/distributedcloud/dcmanager/state/service.py index 8efe26101..9ec6a0617 100644 --- a/distributedcloud/dcmanager/state/service.py +++ b/distributedcloud/dcmanager/state/service.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # # The right to copy, distribute, modify, or otherwise make use # of this software may be licensed only pursuant to the terms @@ -112,12 +112,11 @@ class DCManagerStateService(service.Service): super(DCManagerStateService, self).stop() @request_context - def update_subcloud_endpoint_status(self, context, subcloud_name=None, - subcloud_region=None, - endpoint_type=None, - sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, - alarmable=True, - ignore_endpoints=None): + def update_subcloud_endpoint_status( + self, context, subcloud_name=None, subcloud_region=None, endpoint_type=None, + sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, alarmable=True, + ignore_endpoints=None + ): # Updates subcloud endpoint sync status LOG.info("Handling update_subcloud_endpoint_status request for " "subcloud: (%s) endpoint: (%s) status:(%s) " diff --git a/distributedcloud/dcmanager/state/subcloud_state_manager.py b/distributedcloud/dcmanager/state/subcloud_state_manager.py index 9332063e3..5fb9acfda 100644 --- a/distributedcloud/dcmanager/state/subcloud_state_manager.py +++ b/distributedcloud/dcmanager/state/subcloud_state_manager.py @@ -10,32 +10,30 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # # The right to copy, distribute, modify, or otherwise make use # of this software may be licensed only pursuant to the terms # of an applicable Wind River license agreement. # +from fm_api import constants as fm_const +from fm_api import fm_api from oslo_log import log as logging from dccommon import consts as dccommon_consts -from dcorch.rpc import client as dcorch_rpc_client - from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client from dcmanager.common import consts from dcmanager.common import context from dcmanager.common import exceptions from dcmanager.common import manager from dcmanager.common import utils -from dcmanager.rpc import client as rpc_client - from dcmanager.db import api as db_api - -from fm_api import constants as fm_const -from fm_api import fm_api +from dcmanager.rpc import client as rpc_client +from dcorch.rpc import client as dcorch_rpc_client LOG = logging.getLogger(__name__) +ALARM_OUT_OF_SYNC = fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC def sync_update_subcloud_endpoint_status(func): @@ -133,27 +131,31 @@ class SubcloudStateManager(manager.Manager): # Trigger subcloud audits for the subcloud after # its identity endpoint turns to other status from unknown + is_sync_unknown = sync_status != dccommon_consts.SYNC_STATUS_UNKNOWN + is_identity_unknown = ( + original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN + ) if endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY \ - and sync_status != dccommon_consts.SYNC_STATUS_UNKNOWN \ - and original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN: + and is_sync_unknown and is_identity_unknown: if not subcloud.first_identity_sync_complete: db_api.subcloud_update(context, subcloud_id, first_identity_sync_complete=True) LOG.debug('Request for audits for %s after updating ' 'identity out of unknown' % subcloud.name) - self.audit_rpc_client.trigger_subcloud_audits(context, subcloud_id) + self.audit_rpc_client.trigger_subcloud_audits( + context, subcloud_id) entity_instance_id = "subcloud=%s.resource=%s" % \ (subcloud.name, endpoint_type) fault = self.fm_api.get_fault( - fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, + ALARM_OUT_OF_SYNC, entity_instance_id) if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \ and fault: try: self.fm_api.clear_fault( - fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa + ALARM_OUT_OF_SYNC, entity_instance_id) except Exception as e: LOG.exception(e) @@ -162,8 +164,9 @@ class SubcloudStateManager(manager.Manager): (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC): entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD try: + fault = fm_api.Fault( - alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa + alarm_id=ALARM_OUT_OF_SYNC, alarm_state=fm_const.FM_ALARM_STATE_SET, entity_type_id=entity_type_id, entity_instance_id=entity_instance_id, @@ -208,7 +211,7 @@ class SubcloudStateManager(manager.Manager): (subcloud.name, endpoint) fault = self.fm_api.get_fault( - fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, + ALARM_OUT_OF_SYNC, entity_instance_id) # TODO(yuxing): batch clear all the out-of-sync alarms of a @@ -219,7 +222,7 @@ class SubcloudStateManager(manager.Manager): and fault: try: self.fm_api.clear_fault( - fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa + ALARM_OUT_OF_SYNC, entity_instance_id) except Exception as e: LOG.exception(e) @@ -229,7 +232,7 @@ class SubcloudStateManager(manager.Manager): entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD try: fault = fm_api.Fault( - alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa + alarm_id=ALARM_OUT_OF_SYNC, alarm_state=fm_const.FM_ALARM_STATE_SET, entity_type_id=entity_type_id, entity_instance_id=entity_instance_id, @@ -250,9 +253,11 @@ class SubcloudStateManager(manager.Manager): if endpoint_to_update_list: try: - db_api.subcloud_status_update_endpoints(context, subcloud_id, - endpoint_to_update_list, - sync_status) + db_api.subcloud_status_update_endpoints( + context, + subcloud_id, + endpoint_to_update_list, + sync_status) except Exception as e: LOG.exception(e) @@ -441,7 +446,9 @@ class SubcloudStateManager(manager.Manager): try: subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region) except Exception: - LOG.exception("Failed to get subcloud by region name %s" % subcloud_region) + LOG.exception( + "Failed to get subcloud by region name %s" % subcloud_region + ) raise if update_state_only: @@ -529,7 +536,9 @@ class SubcloudStateManager(manager.Manager): try: subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region) except Exception: - LOG.exception("Failed to get subcloud by region name: %s" % subcloud_region) + LOG.exception( + "Failed to get subcloud by region name: %s" % subcloud_region + ) raise try: diff --git a/distributedcloud/dcmanager/tests/base.py b/distributedcloud/dcmanager/tests/base.py index cda1411a8..fcb310996 100644 --- a/distributedcloud/dcmanager/tests/base.py +++ b/distributedcloud/dcmanager/tests/base.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,25 +16,23 @@ # import json -import sqlalchemy from oslo_config import cfg from oslo_db import options +from oslotest import base +import sqlalchemy +from sqlalchemy.engine import Engine +from sqlalchemy import event from dcmanager.common import consts -from dcmanager.db import api as api +from dcmanager.db import api from dcmanager.db.sqlalchemy import api as db_api - from dcmanager.tests import utils -from oslotest import base - get_engine = api.get_engine # Enable foreign key support in sqlite - see: # http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html -from sqlalchemy.engine import Engine -from sqlalchemy import event SUBCLOUD_1 = {'name': 'subcloud1', 'region_name': '2ec93dfb654846909efe61d1b39dd2ce', diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_peer_group_association.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_peer_group_association.py index 1fafabea2..c14c9c8b3 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_peer_group_association.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_peer_group_association.py @@ -1,13 +1,15 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import mock -from six.moves import http_client import uuid +import mock + +from six.moves import http_client + from dcmanager.db.sqlalchemy import api as db_api from dcmanager.rpc import client as rpc_client @@ -308,7 +310,9 @@ class TestPeerGroupAssociationUpdate(testroot.DCManagerApiTest, @mock.patch.object(psd_common, 'OpenStackDriver') @mock.patch.object(peer_group_association, 'SysinvClient') @mock.patch.object(rpc_client, 'ManagerClient') - def test_sync_association(self, mock_client, mock_sysinv_client, mock_keystone_client): + def test_sync_association( + self, mock_client, mock_sysinv_client, mock_keystone_client + ): mock_client().sync_subcloud_peer_group.return_value = True mock_keystone_client().keystone_client = FakeKeystoneClient() mock_sysinv_client.return_value = FakeSysinvClient() diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py index e8703c726..9a3509cce 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -7,10 +7,10 @@ import base64 import copy import json - -import mock import os from os import path as os_path + +import mock import six from tsconfig.tsconfig import SW_VERSION import webtest @@ -172,7 +172,8 @@ class TestSubcloudDeployBootstrap(testroot.DCManagerApiTest): @mock.patch.object(dutils, 'load_yaml_file') @mock.patch.object(os_path, 'exists') def test_subcloud_bootstrap_no_bootstrap_values_on_request( - self, mock_path_exists, mock_load_yaml_file): + self, mock_path_exists, mock_load_yaml_file + ): mock_path_exists.side_effect = [False, False, False, False, True] fake_bootstrap_values = copy.copy( fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) @@ -219,7 +220,7 @@ class TestSubcloudDeployBootstrap(testroot.DCManagerApiTest): name="existing_subcloud", deploy_status=consts.DEPLOY_STATE_DONE, **conflicting_subnet - ) + ) subcloud = fake_subcloud.create_fake_subcloud( self.ctx, @@ -264,13 +265,15 @@ class TestSubcloudDeployConfig(testroot.DCManagerApiTest): self, mock_load_yaml, mock_path_exists ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx, data_install='') - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') + fake_password = \ + (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') data = {'sysadmin_password': fake_password} self.mock_rpc_client().subcloud_deploy_config.return_value = True self.mock_get_request_data.return_value = data overrides_file = psd_common.get_config_file_path(subcloud.name) - mock_path_exists.side_effect = lambda x: True if x == overrides_file else False + mock_path_exists.side_effect = \ + lambda x: True if x == overrides_file else False mock_load_yaml.return_value = { consts.BOOTSTRAP_ADDRESS: fake_subcloud.FAKE_BOOTSTRAP_VALUE[consts.BOOTSTRAP_ADDRESS]} @@ -289,7 +292,8 @@ class TestSubcloudDeployConfig(testroot.DCManagerApiTest): subcloud = fake_subcloud.create_fake_subcloud( self.ctx, data_install=json.dumps(data_install) ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') + fake_password = \ + (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') data = {'sysadmin_password': fake_password} self.mock_rpc_client().subcloud_deploy_config.return_value = True @@ -391,7 +395,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): mock_initial_deployment.return_value = True self.mock_rpc_client().subcloud_deploy_install.return_value = True - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') response = self.app.patch_json( FAKE_URL + '/' + str(subcloud.id) + '/install', @@ -427,10 +432,12 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): mock_initial_deployment.return_value = True self.mock_rpc_client().subcloud_deploy_install.return_value = True - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') - with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + with mock.patch('builtins.open', mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.patch_json( FAKE_URL + '/' + str(subcloud.id) + '/install', headers=FAKE_HEADERS, params=install_payload) @@ -441,7 +448,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): @mock.patch.object(psd_common, 'is_initial_deployment') def test_install_subcloud_not_initial_deployment( - self, mock_initial_deployment): + self, mock_initial_deployment + ): subcloud = fake_subcloud.create_fake_subcloud( self.ctx, @@ -463,7 +471,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): self.mock_get_subcloud_db_install_values.return_value = install_data self.mock_rpc_client().subcloud_deploy_install.return_value = True - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') mock_initial_deployment.return_value = False six.assertRaisesRegex(self, webtest.app.AppError, "400 *", @@ -489,7 +498,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): @mock.patch.object(psd_common, 'is_initial_deployment') def test_install_subcloud_no_install_values_on_request_or_db( - self, mock_initial_deployment): + self, mock_initial_deployment + ): subcloud = fake_subcloud.create_fake_subcloud( self.ctx, @@ -513,7 +523,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): @mock.patch.object(psd_common, 'is_initial_deployment') def test_install_subcloud_no_install_values_on_request( - self, mock_initial_deployment): + self, mock_initial_deployment + ): subcloud = fake_subcloud.create_fake_subcloud( self.ctx, @@ -533,7 +544,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest): self.mock_get_subcloud_db_install_values.return_value = install_data self.mock_rpc_client().subcloud_deploy_install.return_value = True - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') mock_initial_deployment.return_value = True response = self.app.patch_json( @@ -688,8 +700,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest): @mock.patch.object(os_path, 'isdir') @mock.patch.object(os, 'listdir') def test_resume_subcloud( - self, mock_os_listdir, mock_os_isdir, mock_initial_deployment, - mock_load_yaml): + self, mock_os_listdir, mock_os_isdir, mock_initial_deployment, mock_load_yaml + ): mock_os_isdir.return_value = True mock_os_listdir.return_value = ['deploy_chart_fake.tgz', 'deploy_overrides_fake.yaml', @@ -705,7 +717,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest): data_install=json.dumps(data_install) ) - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') self.mock_rpc_client().subcloud_deploy_resume.return_value = True mock_initial_deployment.return_value = True mock_load_yaml.return_value = { @@ -722,7 +735,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest): install_request = {'install_values': data_install, 'sysadmin_password': fake_sysadmin_password, 'bmc_password': fake_bmc_password} - bootstrap_request = {'bootstrap_values': fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA} + bootstrap_request = \ + {'bootstrap_values': fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA} config_request = {'deploy_config': 'deploy config values', 'sysadmin_password': fake_sysadmin_password} resume_request = {**install_request, @@ -758,7 +772,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest): deploy_status=consts.DEPLOY_STATE_CREATED, software_version=SW_VERSION) - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') self.mock_rpc_client().subcloud_deploy_resume.return_value = True mock_initial_deployment.return_value = False @@ -780,7 +795,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest): deploy_status=consts.DEPLOY_STATE_CREATED, software_version=SW_VERSION) - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') self.mock_rpc_client().subcloud_deploy_resume.return_value = True invalid_resume_states = [consts.DEPLOY_STATE_INSTALLING, consts.DEPLOY_STATE_BOOTSTRAPPING, @@ -826,7 +842,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest): mock_os_listdir.return_value = ['deploy_chart_fake.tgz', 'deploy_overrides_fake.yaml', 'deploy_playbook_fake.yaml'] - self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + self.mock_get_vault_load_files.return_value = \ + ('iso_file_path', 'sig_file_path') self.mock_rpc_client().subcloud_deploy_resume.return_value = True mock_initial_deployment.return_value = True diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_backup.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_backup.py index 72fc76d8e..b0831de4b 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_backup.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_backup.py @@ -1,14 +1,14 @@ # -# Copyright (c) 2022-2023 Wind River Systems, Inc. +# Copyright (c) 2022-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from oslo_utils import timeutils - import base64 import copy + import mock +from oslo_utils import timeutils import six import webtest @@ -94,940 +94,1128 @@ FAKE_RESTORE_VALUES_VALID_IP = { class TestSubcloudCreate(testroot.DCManagerApiTest): - def setUp(self): - super(TestSubcloudCreate, self).setUp() self.ctx = utils.dummy_context() - p = mock.patch.object(rpc_client, 'SubcloudStateClient') + p = mock.patch.object(rpc_client, "SubcloudStateClient") + self.fake_password = base64.b64encode("testpass".encode("utf-8")).decode( + "ascii" + ) self.mock_rpc_state_client = p.start() self.addCleanup(p.stop) - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') - def test_backup_create_subcloud(self, mock_rpc_client, mock_sysinv, - mock_openstack): - + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") + def test_backup_create_subcloud( + self, mock_rpc_client, mock_sysinv, mock_openstack + ): mock_rpc_client().backup_subclouds.return_value = True subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} - good_health_states = [FAKE_GOOD_SYSTEM_HEALTH, - FAKE_GOOD_SYSTEM_HEALTH_NO_ALARMS] + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} + good_health_states = [ + FAKE_GOOD_SYSTEM_HEALTH, + FAKE_GOOD_SYSTEM_HEALTH_NO_ALARMS, + ] for system_health in good_health_states: - mock_sysinv().get_system_health.return_value = system_health - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_with_bad_system_health( - self, mock_rpc_client, mock_sysinv, mock_openstack): - + self, mock_rpc_client, mock_sysinv, mock_openstack + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) mock_rpc_client().backup_subclouds.return_value = True - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '1'} - bad_health_states = [FAKE_SYSTEM_HEALTH_MGMT_ALARM, FAKE_SYSTEM_HEALTH_CEPH_FAIL, - FAKE_SYSTEM_HEALTH_K8S_FAIL] + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} + + bad_health_states = [ + FAKE_SYSTEM_HEALTH_MGMT_ALARM, + FAKE_SYSTEM_HEALTH_CEPH_FAIL, + FAKE_SYSTEM_HEALTH_K8S_FAIL, + ] for system_health in bad_health_states: - mock_sysinv().get_system_health.return_value = system_health - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_unknown_subcloud(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '123'} + data = {"sysadmin_password": self.fake_password, "subcloud": "123"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_offline_subcloud(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_unmanaged_subcloud(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_invalid_state(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN, - deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPING) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPING, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '1'} + data = {"sysadmin_password": self.fake_password, "group": "1"} mock_rpc_client().backup_subclouds.return_value = True - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_unknown_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': 'Fake'} + data = {"sysadmin_password": self.fake_password, "group": "Fake"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_group_not_online(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '1'} + data = {"sysadmin_password": self.fake_password, "group": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_group_not_managed(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '1'} + data = {"sysadmin_password": self.fake_password, "group": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_group_no_valid_state(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN, - deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPING) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPING, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '1'} + data = {"sysadmin_password": self.fake_password, "group": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_and_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'group': '1'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "group": "1", + } mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_no_subcloud_no_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password} + data = {"sysadmin_password": self.fake_password} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) - - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') - def test_backup_create_subcloud_backup_values(self, mock_rpc_client, mock_sysinv, mock_openstack): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") + def test_backup_create_subcloud_backup_values( + self, mock_rpc_client, mock_sysinv, mock_openstack + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'backup_values': 'TestFileDirectory'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "backup_values": "TestFileDirectory", + } mock_rpc_client().backup_subclouds.return_value = True mock_sysinv().get_system_health.return_value = FAKE_GOOD_SYSTEM_HEALTH - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_no_password(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - data = {'subcloud': '1'} + data = {"subcloud": "1"} mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') - def test_backup_create_subcloud_local_only(self, mock_rpc_client, mock_sysinv, - mock_openstack): + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") + def test_backup_create_subcloud_local_only( + self, mock_rpc_client, mock_sysinv, mock_openstack + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'local_only': 'True'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "local_only": "True", + } mock_rpc_client().backup_subclouds.return_value = True mock_sysinv().get_system_health.return_value = FAKE_GOOD_SYSTEM_HEALTH - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_local_only_registry_images( - self, mock_rpc_client, mock_sysinv, mock_openstack): - + self, mock_rpc_client, mock_sysinv, mock_openstack + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'local_only': 'True', - 'registry_images': 'True'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "local_only": "True", + "registry_images": "True", + } mock_rpc_client().backup_subclouds.return_value = True mock_sysinv().get_system_health.return_value = FAKE_GOOD_SYSTEM_HEALTH - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_no_local_only_registry_images( - self, mock_rpc_client, mock_sysinv, mock_openstack): - + self, mock_rpc_client, mock_sysinv, mock_openstack + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'registry_images': 'True'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "registry_images": "True", + } mock_rpc_client().backup_subclouds.return_value = True mock_sysinv().get_system_health.return_value = FAKE_GOOD_SYSTEM_HEALTH - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_unknown_parameter(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'unkown_variable': 'FakeValue'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "unkown_variable": "FakeValue", + } mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_create_subcloud_invalid_payload_format(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - data = 'WrongFormat' + data = "WrongFormat" mock_rpc_client().backup_subclouds.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) - - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') - def test_backup_create_subcloud_json_file(self, mock_rpc_client, mock_sysinv, - mock_openstack): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") + def test_backup_create_subcloud_json_file( + self, mock_rpc_client, mock_sysinv, mock_openstack + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().backup_subclouds.return_value = True mock_sysinv().get_system_health.return_value = FAKE_GOOD_SYSTEM_HEALTH - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch('dcmanager.common.utils.OpenStackDriver') - @mock.patch('dcmanager.common.utils.SysinvClient') - @mock.patch.object(rpc_client, 'ManagerClient') - def test_create_concurrent_backup(self, mock_rpc_client, mock_sysinv, - mock_openstack): - + @mock.patch("dcmanager.common.utils.OpenStackDriver") + @mock.patch("dcmanager.common.utils.SysinvClient") + @mock.patch.object(rpc_client, "ManagerClient") + def test_create_concurrent_backup( + self, mock_rpc_client, mock_sysinv, mock_openstack + ): mock_sysinv().get_system_health.return_value = FAKE_GOOD_SYSTEM_HEALTH mock_rpc_client().backup_subclouds.return_value = True subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} - db_api.subcloud_update(self.ctx, - subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) - ongoing_backup_states = [consts.BACKUP_STATE_INITIAL, - consts.BACKUP_STATE_VALIDATING, - consts.BACKUP_STATE_PRE_BACKUP, - consts.BACKUP_STATE_IN_PROGRESS] + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} + db_api.subcloud_update( + self.ctx, + subcloud.id, + deploy_status=consts.DEPLOY_STATE_DONE, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - final_backup_states = [consts.BACKUP_STATE_VALIDATE_FAILED, - consts.BACKUP_STATE_PREP_FAILED, - consts.BACKUP_STATE_FAILED, - consts.BACKUP_STATE_UNKNOWN, - consts.BACKUP_STATE_COMPLETE_CENTRAL, - consts.BACKUP_STATE_COMPLETE_LOCAL, - consts.BACKUP_STATE_IN_PROGRESS] + ongoing_backup_states = [ + consts.BACKUP_STATE_INITIAL, + consts.BACKUP_STATE_VALIDATING, + consts.BACKUP_STATE_PRE_BACKUP, + consts.BACKUP_STATE_IN_PROGRESS, + ] + + final_backup_states = [ + consts.BACKUP_STATE_VALIDATE_FAILED, + consts.BACKUP_STATE_PREP_FAILED, + consts.BACKUP_STATE_FAILED, + consts.BACKUP_STATE_UNKNOWN, + consts.BACKUP_STATE_COMPLETE_CENTRAL, + consts.BACKUP_STATE_COMPLETE_LOCAL, + consts.BACKUP_STATE_IN_PROGRESS, + ] for backup_state in ongoing_backup_states + final_backup_states: - db_api.subcloud_update(self.ctx, subcloud.id, - backup_status=backup_state) + db_api.subcloud_update(self.ctx, subcloud.id, backup_status=backup_state) if backup_state in ongoing_backup_states: # Expect the operation to fail - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.post_json, FAKE_URL_CREATE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post_json, + FAKE_URL_CREATE, + headers=FAKE_HEADERS, + params=data, + ) else: # Expect the operation to succeed - response = self.app.post_json(FAKE_URL_CREATE, - headers=FAKE_HEADERS, - params=data) + response = self.app.post_json( + FAKE_URL_CREATE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) class TestSubcloudDelete(testroot.DCManagerApiTest): - def setUp(self): - super(TestSubcloudDelete, self).setUp() self.ctx = utils.dummy_context() - p = mock.patch.object(rpc_client, 'SubcloudStateClient') + self.fake_password = base64.b64encode("testpass".encode("utf-8")).decode( + "ascii" + ) + p = mock.patch.object(rpc_client, "SubcloudStateClient") self.mock_rpc_state_client = p.start() self.addCleanup(p.stop) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_subcloud(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().delete_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_DELETE + release_version, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 207) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_unknown_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '999'} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password, "group": "999"} mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def _test_backup_delete_subcloud_unmanaged(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '1'} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password, "group": "1"} mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'group': '1'} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password, "group": "1"} mock_rpc_client().delete_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_DELETE + release_version, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 207) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_subcloud_and_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) HEADER = copy.copy(FAKE_HEADERS) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'group': '1'} + release_version = "22.12" + + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "group": "1", + } mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=HEADER, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=HEADER, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_no_subcloud_no_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password} mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_invalid_url(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - invalid_url = '/v1.0/subcloud-backup/fake/' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + invalid_url = "/v1.0/subcloud-backup/fake/" + + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.patch_json, invalid_url, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.patch_json, + invalid_url, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_no_release_version(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_DELETE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_DELETE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_no_content(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().delete_subcloud_backups.return_value = None - response = self.app.patch_json(FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_DELETE + release_version, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 204) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_exception(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1'} + release_version = "22.12" + + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().delete_subcloud_backups.side_effect = Exception() - six.assertRaisesRegex(self, webtest.app.AppError, "500 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "500 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_local_only(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_LOCAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_LOCAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'local_only': 'True'} + release_version = "22.12" + + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "local_only": "True", + } mock_rpc_client().delete_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_DELETE + release_version, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 207) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_central(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_CENTRAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'local_only': 'False'} + release_version = "22.12" + + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "local_only": "False", + } mock_rpc_client().delete_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_DELETE + release_version, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 207) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_invalid_local_only(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_LOCAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_LOCAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'local_only': 'Fake'} + release_version = "22.12" + + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "local_only": "Fake", + } mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_delete_local_only_unknown_subcloud(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=timeutils.utcnow(), - backup_status=consts.BACKUP_STATE_COMPLETE_LOCAL) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=timeutils.utcnow(), + backup_status=consts.BACKUP_STATE_COMPLETE_LOCAL, + ) - release_version = '22.12' - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '123', - 'local_only': 'True'} + release_version = "22.12" + + data = { + "sysadmin_password": self.fake_password, + "subcloud": "123", + "local_only": "True", + } mock_rpc_client().delete_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.patch_json, FAKE_URL_DELETE + release_version, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.patch_json, + FAKE_URL_DELETE + release_version, + headers=FAKE_HEADERS, + params=data, + ) class TestSubcloudRestore(testroot.DCManagerApiTest): - def setUp(self): - super(TestSubcloudRestore, self).setUp() self.ctx = utils.dummy_context() - p = mock.patch.object(rpc_client, 'SubcloudStateClient') + self.fake_password = base64.b64encode("testpass".encode("utf-8")).decode( + "ascii" + ) + p = mock.patch.object(rpc_client, "SubcloudStateClient") self.mock_rpc_state_client = p.start() self.addCleanup(p.stop) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_subcloud(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().restore_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_RESTORE, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_RESTORE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) @@ -1035,14 +1223,15 @@ class TestSubcloudRestore(testroot.DCManagerApiTest): def test_backup_restore_subcloud_valid_restore_values(self, mock_rpc_client): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') + fake_password = \ + (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') data = {'sysadmin_password': fake_password, 'subcloud': '1', 'restore_values': FAKE_RESTORE_VALUES_VALID_IP} @@ -1057,14 +1246,15 @@ class TestSubcloudRestore(testroot.DCManagerApiTest): def test_backup_restore_subcloud_invalid_restore_values(self, mock_rpc_client): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') + fake_password = \ + (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') data = {'sysadmin_password': fake_password, 'subcloud': '1', 'restore_values': FAKE_RESTORE_VALUES_INVALID_IP} @@ -1075,155 +1265,195 @@ class TestSubcloudRestore(testroot.DCManagerApiTest): @mock.patch.object(rpc_client, 'ManagerClient') def test_backup_restore_unknown_subcloud(self, mock_rpc_client): - - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '999'} + data = {"sysadmin_password": self.fake_password, "subcloud": "999"} mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_subcloud_group(self, mock_rpc_client): - test_group_id = 1 - subcloud = fake_subcloud.create_fake_subcloud(self.ctx, group_id=test_group_id) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + subcloud = fake_subcloud.create_fake_subcloud( + self.ctx, group_id=test_group_id + ) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'group': str(test_group_id)} + data = {"sysadmin_password": self.fake_password, "group": str(test_group_id)} mock_rpc_client().restore_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_RESTORE, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_RESTORE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_group_single_valid_subcloud(self, mock_rpc_client): - test_group_id = 1 - subcloud = fake_subcloud.create_fake_subcloud(self.ctx, group_id=test_group_id) - subcloud2 = fake_subcloud.create_fake_subcloud(self.ctx, group_id=test_group_id, - name=base.SUBCLOUD_2['name'], - region_name=base.SUBCLOUD_2['region_name']) + subcloud = fake_subcloud.create_fake_subcloud( + self.ctx, group_id=test_group_id + ) + subcloud2 = fake_subcloud.create_fake_subcloud( + self.ctx, group_id=test_group_id, name=base.SUBCLOUD_2['name'], + region_name=base.SUBCLOUD_2['region_name'] + ) # Valid subcloud, management state is 'unmanaged' - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) # Invalid subcloud, management state is 'managed' - db_api.subcloud_update(self.ctx, - subcloud2.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud2.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'group': str(test_group_id)} + data = {"sysadmin_password": self.fake_password, "group": str(test_group_id)} mock_rpc_client().restore_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_RESTORE, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_RESTORE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_unknown_subcloud_group(self, mock_rpc_client): - - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'group': '123'} + data = {"sysadmin_password": self.fake_password, "group": "123"} mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_no_payload(self, mock_rpc_client): - test_group_id = 1 - subcloud = fake_subcloud.create_fake_subcloud(self.ctx, group_id=test_group_id) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + subcloud = fake_subcloud.create_fake_subcloud( + self.ctx, group_id=test_group_id + ) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_no_local_only_registry_images(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '1', - 'local_only': 'false', 'registry_images': 'true'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "local_only": "false", + "registry_images": "true", + } mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_subcloud_managed(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '1'} + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_subcloud_invalid_deploy_states(self, mock_rpc_client): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '1'} + + data = {"sysadmin_password": self.fake_password, "subcloud": "1"} mock_rpc_client().restore_subcloud_backups.return_value = True for status in consts.INVALID_DEPLOY_STATES_FOR_RESTORE: - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - deploy_status=status, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + deploy_status=status, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) six.assertRaisesRegex(self, webtest.app.AppError, "400 *", self.app.patch_json, FAKE_URL_RESTORE, @@ -1231,137 +1461,169 @@ class TestSubcloudRestore(testroot.DCManagerApiTest): @mock.patch.object(rpc_client, 'ManagerClient') def test_backup_restore_subcloud_and_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, 'subcloud': '1', 'group': '1'} + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "group": "1", + } mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_no_subcloud_no_group(self, mock_rpc_client): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password} + data = {"sysadmin_password": self.fake_password} mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(rpc_client, 'ManagerClient') - @mock.patch('os.path.isdir') - @mock.patch('os.listdir') - def test_backup_restore_subcloud_with_install_no_release(self, - mock_listdir, - mock_isdir, - mock_rpc_client): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(rpc_client, "ManagerClient") + @mock.patch("os.path.isdir") + @mock.patch("os.listdir") + def test_backup_restore_subcloud_with_install_no_release( + self, mock_listdir, mock_isdir, mock_rpc_client + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('\'', '"') - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - data_install=data_install) + data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace( + "'", '"' + ) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + data_install=data_install, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'with_install': 'True' - } + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "with_install": "True", + } mock_isdir.return_value = True - mock_listdir.return_value = ['test.iso', 'test.sig'] + mock_listdir.return_value = ["test.iso", "test.sig"] mock_rpc_client().restore_subcloud_backups.return_value = True - response = self.app.patch_json(FAKE_URL_RESTORE, - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL_RESTORE, headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(rpc_client, 'ManagerClient') - @mock.patch('os.path.isdir') - @mock.patch('os.listdir') - def test_backup_restore_subcloud_with_install_with_release(self, - mock_listdir, - mock_isdir, - mock_rpc_client): - + @mock.patch.object(rpc_client, "ManagerClient") + @mock.patch("os.path.isdir") + @mock.patch("os.listdir") + def test_backup_restore_subcloud_with_install_with_release( + self, mock_listdir, mock_isdir, mock_rpc_client + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('\'', '"') - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - data_install=data_install) + data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace( + "'", '"' + ) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + data_install=data_install, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'with_install': 'True', - 'release': '22.12' - } + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "with_install": "True", + "release": "22.12", + } mock_isdir.return_value = True - mock_listdir.return_value = ['test.iso', 'test.sig'] + mock_listdir.return_value = ["test.iso", "test.sig"] mock_rpc_client().restore_subcloud_backups.return_value = True with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.patch_json(FAKE_URL_RESTORE, headers=FAKE_HEADERS, params=data) self.assertEqual(response.status_int, 200) - @mock.patch.object(rpc_client, 'ManagerClient') + @mock.patch.object(rpc_client, "ManagerClient") def test_backup_restore_subcloud_no_install_with_release(self, mock_rpc_client): - - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'release': '22.12' - } + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "release": "22.12", + } mock_rpc_client().restore_subcloud_backups.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(rpc_client, 'ManagerClient') - @mock.patch('dcmanager.common.utils.get_matching_iso') - def test_backup_restore_subcloud_invalid_release(self, - mock_rpc_client, - mock_matching_iso): - - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'subcloud': '1', - 'release': '00.00' - } + @mock.patch.object(rpc_client, "ManagerClient") + @mock.patch("dcmanager.common.utils.get_matching_iso") + def test_backup_restore_subcloud_invalid_release( + self, mock_rpc_client, mock_matching_iso + ): + data = { + "sysadmin_password": self.fake_password, + "subcloud": "1", + "release": "00.00", + } mock_rpc_client().restore_subcloud_backups.return_value = True mock_matching_iso.return_value = [None, True] - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL_RESTORE, - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL_RESTORE, + headers=FAKE_HEADERS, + params=data, + ) diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_deploy.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_deploy.py index 2896c35e8..0a11899dc 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_deploy.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_deploy.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,12 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. # + import os from os import path as os_path import mock import six from six.moves import http_client +from tsconfig.tsconfig import SW_VERSION import webtest from dccommon import consts as dccommon_consts @@ -30,14 +32,16 @@ from dcmanager.tests.unit.api import test_root_controller as testroot from dcmanager.tests.unit.common import fake_subcloud from dcmanager.tests import utils -from tsconfig.tsconfig import SW_VERSION - FAKE_SOFTWARE_VERSION = '22.12' FAKE_TENANT = utils.UUID1 -FAKE_ID = '1' -FAKE_URL = '/v1.0/subcloud-deploy' -FAKE_HEADERS = {'X-Tenant-Id': FAKE_TENANT, 'X_ROLE': 'admin,member,reader', - 'X-Identity-Status': 'Confirmed', 'X-Project-Name': 'admin'} +FAKE_ID = "1" +FAKE_URL = "/v1.0/subcloud-deploy" +FAKE_HEADERS = { + "X-Tenant-Id": FAKE_TENANT, + "X_ROLE": "admin,member,reader", + "X-Identity-Status": "Confirmed", + "X-Project-Name": "admin", +} FAKE_DEPLOY_PLAYBOOK_PREFIX = consts.DEPLOY_PLAYBOOK + '_' FAKE_DEPLOY_OVERRIDES_PREFIX = consts.DEPLOY_OVERRIDES + '_' @@ -52,7 +56,8 @@ FAKE_DEPLOY_FILES = { FAKE_DEPLOY_CHART_PREFIX: FAKE_DEPLOY_CHART_FILE, } FAKE_DEPLOY_DELETE_FILES = { - FAKE_DEPLOY_PLAYBOOK_PREFIX: '/opt/platform/deploy/22.12/deployment-manager.yaml', + FAKE_DEPLOY_PLAYBOOK_PREFIX: + '/opt/platform/deploy/22.12/deployment-manager.yaml', FAKE_DEPLOY_OVERRIDES_PREFIX: '/opt/platform/deploy/22.12/deployment-manager-overrides-subcloud.yaml', FAKE_DEPLOY_CHART_PREFIX: '/opt/platform/deploy/22.12/deployment-manager.tgz', @@ -73,154 +78,144 @@ class TestSubcloudDeploy(testroot.DCManagerApiTest): super(TestSubcloudDeploy, self).setUp() self.ctx = utils.dummy_context() - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy(self, mock_upload_files): - params = [('release', FAKE_SOFTWARE_VERSION)] + params = [("release", FAKE_SOFTWARE_VERSION)] fields = list() for opt in consts.DEPLOY_COMMON_FILE_OPTIONS: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, webtest.Upload(fake_name, fake_content))) mock_upload_files.return_value = True params += fields with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, params=params) self.assertEqual(response.status_code, http_client.OK) - self.assertEqual(FAKE_SOFTWARE_VERSION, response.json['software_version']) + self.assertEqual(FAKE_SOFTWARE_VERSION, response.json["software_version"]) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_without_release(self, mock_upload_files): fields = list() for opt in consts.DEPLOY_COMMON_FILE_OPTIONS: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields) + response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields) self.assertEqual(response.status_code, http_client.OK) # Verify the active release will be returned if release doesn't present - self.assertEqual(SW_VERSION, response.json['software_version']) + self.assertEqual(SW_VERSION, response.json["software_version"]) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_missing_chart(self, mock_upload_files): - opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, consts.DEPLOY_PRESTAGE] + opts = [ + consts.DEPLOY_PLAYBOOK, + consts.DEPLOY_OVERRIDES, + consts.DEPLOY_PRESTAGE, + ] fields = list() for opt in opts: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields, - expect_errors=True) + response = self.app.post( + FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True + ) self.assertEqual(response.status_code, http_client.BAD_REQUEST) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_missing_chart_prestages(self, mock_upload_files): opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES] fields = list() for opt in opts: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields, - expect_errors=True) + response = self.app.post( + FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True + ) self.assertEqual(response.status_code, http_client.BAD_REQUEST) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') - def test_post_subcloud_deploy_missing_playbook_overrides(self, mock_upload_files): + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") + def test_post_subcloud_deploy_missing_playbook_overrides( + self, mock_upload_files + ): opts = [consts.DEPLOY_CHART, consts.DEPLOY_PRESTAGE] fields = list() for opt in opts: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields, - expect_errors=True) + response = self.app.post( + FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True + ) self.assertEqual(response.status_code, http_client.BAD_REQUEST) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_missing_prestage(self, mock_upload_files): opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, consts.DEPLOY_CHART] fields = list() for opt in opts: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields) + response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields) self.assertEqual(response.status_code, http_client.OK) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_all_input(self, mock_upload_files): - opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, - consts.DEPLOY_CHART, consts.DEPLOY_PRESTAGE] + opts = [ + consts.DEPLOY_PLAYBOOK, + consts.DEPLOY_OVERRIDES, + consts.DEPLOY_CHART, + consts.DEPLOY_PRESTAGE, + ] fields = list() for opt in opts: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields) + response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields) self.assertEqual(response.status_code, http_client.OK) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_prestage(self, mock_upload_files): opts = [consts.DEPLOY_PRESTAGE] fields = list() for opt in opts: fake_name = opt + "_fake" - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, fake_name, fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields) + response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields) self.assertEqual(response.status_code, http_client.OK) - @mock.patch.object(subcloud_deploy.SubcloudDeployController, - '_upload_files') + @mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files") def test_post_subcloud_deploy_missing_file_name(self, mock_upload_files): fields = list() for opt in consts.DEPLOY_COMMON_FILE_OPTIONS: - fake_content = "fake content".encode('utf-8') + fake_content = "fake content".encode("utf-8") fields.append((opt, "", fake_content)) mock_upload_files.return_value = True - response = self.app.post(FAKE_URL, - headers=FAKE_HEADERS, - upload_files=fields, - expect_errors=True) + response = self.app.post( + FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True + ) self.assertEqual(response.status_code, http_client.BAD_REQUEST) - @mock.patch.object(dutils, 'get_filename_by_prefix') + @mock.patch.object(dutils, "get_filename_by_prefix") def test_get_subcloud_deploy_with_release(self, mock_get_filename_by_prefix): - def get_filename_by_prefix_side_effect(dir_path, prefix): filename = FAKE_DEPLOY_FILES.get(prefix) if filename: @@ -234,24 +229,34 @@ class TestSubcloudDeploy(testroot.DCManagerApiTest): url = FAKE_URL + '/' + FAKE_SOFTWARE_VERSION with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.get(url, headers=FAKE_HEADERS) self.assertEqual(response.status_code, http_client.OK) - self.assertEqual(FAKE_SOFTWARE_VERSION, - response.json['subcloud_deploy']['software_version']) - self.assertEqual(FAKE_DEPLOY_PLAYBOOK_FILE, - response.json['subcloud_deploy'][consts.DEPLOY_PLAYBOOK]) - self.assertEqual(FAKE_DEPLOY_OVERRIDES_FILE, - response.json['subcloud_deploy'][consts.DEPLOY_OVERRIDES]) - self.assertEqual(FAKE_DEPLOY_CHART_FILE, - response.json['subcloud_deploy'][consts.DEPLOY_CHART]) - self.assertEqual(None, - response.json['subcloud_deploy'][consts.DEPLOY_PRESTAGE]) + self.assertEqual( + FAKE_SOFTWARE_VERSION, + response.json["subcloud_deploy"]["software_version"], + ) + self.assertEqual( + FAKE_DEPLOY_PLAYBOOK_FILE, + response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK], + ) + self.assertEqual( + FAKE_DEPLOY_OVERRIDES_FILE, + response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES], + ) + self.assertEqual( + FAKE_DEPLOY_CHART_FILE, + response.json["subcloud_deploy"][consts.DEPLOY_CHART], + ) + self.assertEqual( + None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE] + ) - @mock.patch.object(dutils, 'get_filename_by_prefix') + @mock.patch.object(dutils, "get_filename_by_prefix") def test_get_subcloud_deploy_without_release(self, mock_get_filename_by_prefix): - def get_filename_by_prefix_side_effect(dir_path, prefix): filename = FAKE_DEPLOY_FILES.get(prefix) if filename: @@ -260,33 +265,47 @@ class TestSubcloudDeploy(testroot.DCManagerApiTest): return None os.path.isdir = mock.Mock(return_value=True) - mock_get_filename_by_prefix.side_effect = \ - get_filename_by_prefix_side_effect + mock_get_filename_by_prefix.side_effect = get_filename_by_prefix_side_effect response = self.app.get(FAKE_URL, headers=FAKE_HEADERS) self.assertEqual(response.status_code, http_client.OK) - self.assertEqual(SW_VERSION, - response.json['subcloud_deploy']['software_version']) - self.assertEqual(FAKE_DEPLOY_PLAYBOOK_FILE, - response.json['subcloud_deploy'][consts.DEPLOY_PLAYBOOK]) - self.assertEqual(FAKE_DEPLOY_OVERRIDES_FILE, - response.json['subcloud_deploy'][consts.DEPLOY_OVERRIDES]) - self.assertEqual(FAKE_DEPLOY_CHART_FILE, - response.json['subcloud_deploy'][consts.DEPLOY_CHART]) - self.assertEqual(None, - response.json['subcloud_deploy'][consts.DEPLOY_PRESTAGE]) + self.assertEqual( + SW_VERSION, response.json["subcloud_deploy"]["software_version"] + ) + self.assertEqual( + FAKE_DEPLOY_PLAYBOOK_FILE, + response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK], + ) + self.assertEqual( + FAKE_DEPLOY_OVERRIDES_FILE, + response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES], + ) + self.assertEqual( + FAKE_DEPLOY_CHART_FILE, + response.json["subcloud_deploy"][consts.DEPLOY_CHART], + ) + self.assertEqual( + None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE] + ) def test_get_config_file_path(self): bootstrap_file = psd_common.get_config_file_path("subcloud1") - install_values = psd_common.get_config_file_path("subcloud1", - consts.INSTALL_VALUES) - deploy_config = psd_common.get_config_file_path("subcloud1", - consts.DEPLOY_CONFIG) - self.assertEqual(bootstrap_file, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml') - self.assertEqual(install_values, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml') - self.assertEqual(deploy_config, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml') + install_values = psd_common.get_config_file_path( + "subcloud1", consts.INSTALL_VALUES + ) + deploy_config = psd_common.get_config_file_path( + "subcloud1", consts.DEPLOY_CONFIG + ) + self.assertEqual( + bootstrap_file, f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml" + ) + self.assertEqual( + install_values, + f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml", + ) + self.assertEqual( + deploy_config, + f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml", + ) @mock.patch.object(os_path, 'isdir') @mock.patch.object(dutils, 'get_sw_version') diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_peer_group.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_peer_group.py index 43cda6322..fc4add3cb 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_peer_group.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_peer_group.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -45,46 +45,41 @@ class SubcloudPeerGroupAPIMixin(APIMixin): def _get_test_subcloud_peer_group_request(self, **kw): # id should not be part of the structure group = { - 'peer-group-name': kw.get('peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME), + 'peer-group-name': kw.get( + 'peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME + ), 'system-leader-id': kw.get( - 'system_leader_id', - '62c9592d-f799-4db9-8d40-6786a74d6021'), + 'system_leader_id', '62c9592d-f799-4db9-8d40-6786a74d6021' + ), 'system-leader-name': kw.get( - 'system_leader_name', - 'dc-test'), - 'group-priority': kw.get( - 'group_priority', - '0'), - 'group-state': kw.get( - 'group_state', - 'enabled'), + 'system_leader_name', 'dc-test' + ), + 'group-priority': kw.get('group_priority', '0'), + 'group-state': kw.get('group_state', 'enabled'), 'max-subcloud-rehoming': kw.get( 'max_subcloud_rehoming', - SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING) + SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING + ) } return group def _get_test_subcloud_peer_group_dict(self, **kw): # id should not be part of the structure group = { - 'peer_group_name': kw.get('peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME), + 'peer_group_name': kw.get( + 'peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME + ), 'system_leader_id': kw.get( - 'system_leader_id', - '62c9592d-f799-4db9-8d40-6786a74d6021'), - 'system_leader_name': kw.get( - 'system_leader_name', - 'dc-test'), - 'group_priority': kw.get( - 'group_priority', - '0'), - 'group_state': kw.get( - 'group_state', - 'enabled'), + 'system_leader_id', '62c9592d-f799-4db9-8d40-6786a74d6021' + ), + 'system_leader_name': kw.get('system_leader_name', 'dc-test'), + 'group_priority': kw.get('group_priority', '0'), + 'group_state': kw.get('group_state', 'enabled'), 'max_subcloud_rehoming': kw.get( 'max_subcloud_rehoming', - SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING), + SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING + ), 'migration_status': None - } return group diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subclouds.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subclouds.py index aa90cb95a..ef4cbcf71 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subclouds.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subclouds.py @@ -1,5 +1,5 @@ # Copyright (c) 2017 Ericsson AB -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -45,8 +45,8 @@ from dcmanager.tests.unit.api.v1.controllers.mixins import PostMixin from dcmanager.tests.unit.common import fake_subcloud from dcmanager.tests import utils -SAMPLE_SUBCLOUD_NAME = 'SubcloudX' -SAMPLE_SUBCLOUD_DESCRIPTION = 'A Subcloud of mystery' +SAMPLE_SUBCLOUD_NAME = "SubcloudX" +SAMPLE_SUBCLOUD_DESCRIPTION = "A Subcloud of mystery" FAKE_ID = fake_subcloud.FAKE_ID FAKE_URL = fake_subcloud.FAKE_URL @@ -55,19 +55,15 @@ FAKE_HEADERS = fake_subcloud.FAKE_HEADERS FAKE_SUBCLOUD_DATA = fake_subcloud.FAKE_SUBCLOUD_DATA FAKE_BOOTSTRAP_VALUE = fake_subcloud.FAKE_BOOTSTRAP_VALUE FAKE_SUBCLOUD_INSTALL_VALUES = fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES -FAKE_SUBCLOUD_INSTALL_VALUES_WITH_PERSISTENT_SIZE = \ +FAKE_SUBCLOUD_INSTALL_VALUES_WITH_PERSISTENT_SIZE = ( fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES_WITH_PERSISTENT_SIZE +) FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD = fake_subcloud.FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD -OAM_FLOATING_IP = '10.10.10.12' +OAM_FLOATING_IP = "10.10.10.12" -FAKE_PATCH = { - "value": { - "patchstate": "Partial-Apply" - } -} +FAKE_PATCH = {"value": {"patchstate": "Partial-Apply"}} -health_report_no_alarm = \ - "System Health:\n \ +health_report_no_alarm = "System Health:\n \ All hosts are provisioned: [Fail]\n \ 1 Unprovisioned hosts\n \ All hosts are unlocked/enabled: [OK]\n \ @@ -78,59 +74,58 @@ health_report_no_alarm = \ All kubernetes control plane pods are ready: [OK]" -health_report_no_mgmt_alarm = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [Fail]\n" \ - "[1] alarms found, [0] of which are management affecting\n" \ - "All kubernetes nodes are ready: [OK]\n" \ +health_report_no_mgmt_alarm = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [Fail]\n" + "[1] alarms found, [0] of which are management affecting\n" + "All kubernetes nodes are ready: [OK]\n" "All kubernetes control plane pods are ready: [OK]" +) -health_report_mgmt_alarm = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [Fail]\n" \ - "[1] alarms found, [1] of which are management affecting\n" \ - "All kubernetes nodes are ready: [OK]\n" \ +health_report_mgmt_alarm = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [Fail]\n" + "[1] alarms found, [1] of which are management affecting\n" + "All kubernetes nodes are ready: [OK]\n" "All kubernetes control plane pods are ready: [OK]" +) class Subcloud(object): def __init__(self, data, is_online): - self.id = data['id'] - self.name = data['name'] - self.description = data['description'] - self.location = data['location'] + self.id = data["id"] + self.name = data["name"] + self.description = data["description"] + self.location = data["location"] self.management_state = dccommon_consts.MANAGEMENT_UNMANAGED if is_online: self.availability_status = dccommon_consts.AVAILABILITY_ONLINE else: self.availability_status = dccommon_consts.AVAILABILITY_OFFLINE - self.deploy_status = data['deploy_status'] - self.management_subnet = data['management_subnet'] - self.management_gateway_ip = data['management_gateway_address'] - self.management_start_ip = data['management_start_address'] - self.management_end_ip = data['management_end_address'] - self.external_oam_subnet = data['external_oam_subnet'] - self.external_oam_gateway_address = \ - data['external_oam_gateway_address'] - self.external_oam_floating_address = \ - data['external_oam_floating_address'] - self.systemcontroller_gateway_ip = \ - data['systemcontroller_gateway_address'] + self.deploy_status = data["deploy_status"] + self.management_subnet = data["management_subnet"] + self.management_gateway_ip = data["management_gateway_address"] + self.management_start_ip = data["management_start_address"] + self.management_end_ip = data["management_end_address"] + self.external_oam_subnet = data["external_oam_subnet"] + self.external_oam_gateway_address = data["external_oam_gateway_address"] + self.external_oam_floating_address = data["external_oam_floating_address"] + self.systemcontroller_gateway_ip = data["systemcontroller_gateway_address"] self.created_at = timeutils.utcnow() self.updated_at = timeutils.utcnow() - self.data_install = '' - self.data_upgrade = '' + self.data_install = "" + self.data_upgrade = "" class FakeAddressPool(object): @@ -145,10 +140,16 @@ class FakeAddressPool(object): class FakeOAMAddressPool(object): - def __init__(self, oam_subnet, oam_start_ip, - oam_end_ip, oam_c1_ip, - oam_c0_ip, oam_gateway_ip, - oam_floating_ip): + def __init__( + self, + oam_subnet, + oam_start_ip, + oam_end_ip, + oam_c1_ip, + oam_c0_ip, + oam_gateway_ip, + oam_floating_ip, + ): self.oam_start_ip = oam_start_ip self.oam_end_ip = oam_end_ip self.oam_c1_ip = oam_c1_ip @@ -159,16 +160,18 @@ class FakeOAMAddressPool(object): class SubcloudAPIMixin(APIMixin): - API_PREFIX = '/v1.0/subclouds' - RESULT_KEY = 'subclouds' + API_PREFIX = "/v1.0/subclouds" + RESULT_KEY = "subclouds" # todo: populate the entire expected fields - EXPECTED_FIELDS = ['id', - 'name', - 'description', - 'location', - 'management-state', - 'created-at', - 'updated-at'] + EXPECTED_FIELDS = [ + "id", + "name", + "description", + "location", + "management-state", + "created-at", + "updated-at", + ] FAKE_BOOTSTRAP_DATA = { "system_mode": "simplex", @@ -209,9 +212,8 @@ class SubcloudAPIMixin(APIMixin): def _get_test_subcloud_dict(self, **kw): # id should not be part of the structure subcloud = { - 'name': kw.get('name', SAMPLE_SUBCLOUD_NAME), - 'description': kw.get('description', - SAMPLE_SUBCLOUD_DESCRIPTION), + "name": kw.get("name", SAMPLE_SUBCLOUD_NAME), + "description": kw.get("description", SAMPLE_SUBCLOUD_DESCRIPTION), } return subcloud @@ -260,86 +262,88 @@ class SubcloudAPIMixin(APIMixin): return self._post_get_test_subcloud() def get_update_object(self): - update_object = { - 'description': 'Updated description' - } + update_object = {"description": "Updated description"} return update_object # Combine Subcloud Group API with mixins to test post, get, update and delete -class TestSubcloudPost(testroot.DCManagerApiTest, - SubcloudAPIMixin, - PostMixin): +class TestSubcloudPost(testroot.DCManagerApiTest, SubcloudAPIMixin, PostMixin): def setUp(self): super(TestSubcloudPost, self).setUp() self.list_of_post_files = psd.SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS self.bootstrap_data = copy.copy(self.FAKE_BOOTSTRAP_DATA) self.install_data = copy.copy(self.FAKE_INSTALL_DATA) - self.management_address_pool = FakeAddressPool('192.168.204.0', 24, - '192.168.204.2', - '192.168.204.100') + self.management_address_pool = FakeAddressPool( + "192.168.204.0", 24, "192.168.204.2", "192.168.204.100" + ) - p = mock.patch.object(psd_common, 'get_network_address_pool') + p = mock.patch.object(psd_common, "get_network_address_pool") self.mock_get_network_address_pool = p.start() - self.mock_get_network_address_pool.return_value = \ + self.mock_get_network_address_pool.return_value = ( self.management_address_pool + ) self.addCleanup(p.stop) - p = mock.patch.object(rpc_client, 'ManagerClient') + p = mock.patch.object(rpc_client, "ManagerClient") self.mock_rpc_client = p.start() self.addCleanup(p.stop) - p = mock.patch.object(psd_common, 'get_ks_client') + p = mock.patch.object(psd_common, "get_ks_client") self.mock_get_ks_client = p.start() self.addCleanup(p.stop) - p = mock.patch.object(psd_common.PatchingClient, 'query') + p = mock.patch.object(psd_common.PatchingClient, "query") self.mock_query = p.start() self.addCleanup(p.stop) - p = mock.patch.object(rpc_client, 'SubcloudStateClient') + p = mock.patch.object(rpc_client, "SubcloudStateClient") self.mock_rpc_state_client = p.start() self.addCleanup(p.stop) def _verify_post_failure(self, response, param, value): - self.assertEqual(http_client.BAD_REQUEST, - response.status_code, - message=("%s=%s returned %s instead of %s" - % (param, - value, - response.status_code, - http_client.BAD_REQUEST))) + self.assertEqual( + http_client.BAD_REQUEST, + response.status_code, + message=( + "%s=%s returned %s instead of %s" + % (param, value, response.status_code, http_client.BAD_REQUEST) + ), + ) # Note: response failures return 'text' rather than json - self.assertEqual('text/plain', response.content_type) + self.assertEqual("text/plain", response.content_type) def _verify_post_success(self, response): self.assertEqual(http_client.OK, response.status_code) - self.assertEqual('application/json', response.content_type) + self.assertEqual("application/json", response.content_type) self.assert_fields(response.json) def test_post_subcloud_wrong_url(self): """Test POST operation rejected when going to the wrong URL.""" params = self.get_post_params() upload_files = self.get_post_upload_files() - six.assertRaisesRegex(self, - webtest.app.AppError, - "404 *", - self.app.post, - WRONG_URL, - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.post, + WRONG_URL, + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) def test_post_no_body(self): """Test POST operation with nearly everything wrong with it.""" - six.assertRaisesRegex(self, - webtest.app.AppError, - "400 *", - self.app.post, - self.get_api_prefix(), - params={}, - headers=self.get_api_headers()) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.post, + self.get_api_prefix(), + params={}, + headers=self.get_api_headers(), + ) @mock.patch.object(cutils, 'LOG') def test_post_subcloud_boostrap_file_malformed(self, mock_logging): @@ -354,7 +358,9 @@ class TestSubcloudPost(testroot.DCManagerApiTest, corrupt_fake_content = \ (yaml.dump(self.FAKE_BOOTSTRAP_DATA) + invalid_keyval).encode("utf-8") upload_files = list() - upload_files.append((consts.BOOTSTRAP_VALUES, fake_name, corrupt_fake_content)) + upload_files.append( + (consts.BOOTSTRAP_VALUES, fake_name, corrupt_fake_content) + ) response = self.app.post(self.get_api_prefix(), params=params, upload_files=upload_files, @@ -362,11 +368,14 @@ class TestSubcloudPost(testroot.DCManagerApiTest, expect_errors=True) self.assertEqual(mock_logging.error.call_count, 1) log_string = mock_logging.error.call_args[0][0] - self.assertIn('Error: Unable to load bootstrap_values file contents', log_string) + self.assertIn( + 'Error: Unable to load bootstrap_values file contents', log_string + ) self.assertEqual(http_client.BAD_REQUEST, response.status_code) # try with valid new entry and verify it works - valid_content = (yaml.dump(self.FAKE_BOOTSTRAP_DATA) + valid_keyval).encode("utf-8") + valid_content = \ + (yaml.dump(self.FAKE_BOOTSTRAP_DATA) + valid_keyval).encode("utf-8") upload_files = list() upload_files.append((consts.BOOTSTRAP_VALUES, fake_name, valid_content)) response = self.app.post(self.get_api_prefix(), @@ -388,20 +397,24 @@ class TestSubcloudPost(testroot.DCManagerApiTest, self.bootstrap_data = copy.copy(self.FAKE_BOOTSTRAP_DATA) del self.bootstrap_data[key] upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self._verify_post_failure(response, key, None) # try with nothing removed and verify it works self.bootstrap_data = copy.copy(self.FAKE_BOOTSTRAP_DATA) upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) self._verify_post_success(response) def _test_post_param_inputs(self, param_key, bad_values, good_value): @@ -411,19 +424,23 @@ class TestSubcloudPost(testroot.DCManagerApiTest, # Test all the bad param values for bad_value in bad_values: params[param_key] = bad_value - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self._verify_post_failure(response, param_key, bad_value) # Test that a good value will work params[param_key] = good_value - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) self._verify_post_success(response) def test_post_subcloud_bad_bootstrap_address(self): @@ -431,27 +448,25 @@ class TestSubcloudPost(testroot.DCManagerApiTest, param_key = "bootstrap-address" # bootstrap-address must be valid IP address - bad_values = ["10.10.10.wut", # including letters in the IP - "10.10.10.276" # 276 is invalid - ] + bad_values = [ + "10.10.10.wut", # including letters in the IP + "10.10.10.276", # 276 is invalid + ] good_values = "10.10.10.3" - self._test_post_param_inputs(param_key, - bad_values, - good_values) + self._test_post_param_inputs(param_key, bad_values, good_values) def test_post_subcloud_bad_IPv6_bootstrap_address(self): """Test POST operation with a bad bootstrap-address""" param_key = "bootstrap-address" # bootstrap-address must be valid IP address - bad_values = ["2620::10a:a103::1135", # more than one double colons - "2620:10a:a001:a103::wut", # invalid letter - "2620:10a:a001:a103:1135" # Incomplete IP - ] + bad_values = [ + "2620::10a:a103::1135", # more than one double colons + "2620:10a:a001:a103::wut", # invalid letter + "2620:10a:a001:a103:1135", # Incomplete IP + ] good_values = "2620:10a:a001:a103::1135" - self._test_post_param_inputs(param_key, - bad_values, - good_values) + self._test_post_param_inputs(param_key, bad_values, good_values) def test_post_subcloud_bad_gateway(self): """Test POST with an invalid gateway.""" @@ -460,31 +475,29 @@ class TestSubcloudPost(testroot.DCManagerApiTest, # systemcontroller_gateway_address must be appropriate address within # the management address pool which is # 192.168.204.0/24 greater than 100 - bad_values = ["192.168.205.101", # 205.xx not in the pool - "192.168.204.99", # 99 is reserved in the pool - "192.168.276.276", # 276 is not a valid IP address - "192.168.206.wut", # including letters in the IP - "192.168.204", # incomplete IP - ] + bad_values = [ + "192.168.205.101", # 205.xx not in the pool + "192.168.204.99", # 99 is reserved in the pool + "192.168.276.276", # 276 is not a valid IP address + "192.168.206.wut", # including letters in the IP + "192.168.204", # incomplete IP + ] good_value = "192.168.204.101" - self._test_post_param_inputs(param_key, - bad_values, - good_value) + self._test_post_param_inputs(param_key, bad_values, good_value) def test_post_subcloud_bad_subnet(self): """Test POST with an invalid subnet.""" param_key = "management_subnet" - bad_values = ["192.168.101.0/32", # /32 would be just one IP - "192.168.101.0/33", # /33 is an invalid CIDR - "192.168.276.0/24", # 276 makes no sense as an IP - "192.168.206.wut/24", # including letters in the IP - "192.168.204/24", # incomplete CIDR - ] + bad_values = [ + "192.168.101.0/32", # /32 would be just one IP + "192.168.101.0/33", # /33 is an invalid CIDR + "192.168.276.0/24", # 276 makes no sense as an IP + "192.168.206.wut/24", # including letters in the IP + "192.168.204/24", # incomplete CIDR + ] good_value = "192.168.101.0/24" - self._test_post_param_inputs(param_key, - bad_values, - good_value) + self._test_post_param_inputs(param_key, bad_values, good_value) def test_post_subcloud_bad_start_ip(self): """Test POST with an invalid management_start_address. @@ -496,17 +509,16 @@ class TestSubcloudPost(testroot.DCManagerApiTest, param_key = "management_start_address" # subnet is 192.168.101.0/24 # end address is 192.168.101.50 - bad_values = ["192.168.100.2", # xx.xx.100.xx is not in the subnet - "192.168.101.51", # start is higher than end - "192.168.101.48", # start is too close to end - "192.168.276.0", # 276 makes no sense as an IP - "192.168.206.wut", # including letters in the IP - "192.168.204", # incomplete IP - ] + bad_values = [ + "192.168.100.2", # xx.xx.100.xx is not in the subnet + "192.168.101.51", # start is higher than end + "192.168.101.48", # start is too close to end + "192.168.276.0", # 276 makes no sense as an IP + "192.168.206.wut", # including letters in the IP + "192.168.204", # incomplete IP + ] good_value = "192.168.101.2" - self._test_post_param_inputs(param_key, - bad_values, - good_value) + self._test_post_param_inputs(param_key, bad_values, good_value) def test_post_subcloud_bad_end_ip(self): """Test POST with an invalid management_end_address. @@ -518,23 +530,22 @@ class TestSubcloudPost(testroot.DCManagerApiTest, param_key = "management_end_address" # subnet is 192.168.101.0/24 # start address is 192.168.101.2 - bad_values = ["192.168.100.50", # xx.xx.100.xx is not in the subnet - "192.168.101.1", # end is less than start - "192.168.101.4", # end is too close to start - "192.168.276.50", # 276 makes no sense as an IP - "192.168.206.wut", # including letters in the IP - "192.168.204", # incomplete IP - ] + bad_values = [ + "192.168.100.50", # xx.xx.100.xx is not in the subnet + "192.168.101.1", # end is less than start + "192.168.101.4", # end is too close to start + "192.168.276.50", # 276 makes no sense as an IP + "192.168.206.wut", # including letters in the IP + "192.168.204", # incomplete IP + ] good_value = "192.168.101.50" - self._test_post_param_inputs(param_key, - bad_values, - good_value) + self._test_post_param_inputs(param_key, bad_values, good_value) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_values(self, mock_vault_files): """Test POST operation with install values is supported by the API.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") # pass a different "install" list of files for this POST self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) @@ -543,20 +554,26 @@ class TestSubcloudPost(testroot.DCManagerApiTest, params = self.get_post_params() # add bmc_password to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8")}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ) + } + ) - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) self._verify_post_success(response) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_without_release_parameter(self, mock_vault_files): """Test POST operation without release parameter.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) upload_files = self.get_post_upload_files() @@ -564,17 +581,23 @@ class TestSubcloudPost(testroot.DCManagerApiTest, params = self.get_post_params() # add bmc_password to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8")}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ) + } + ) - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) self._verify_post_success(response) # Verify that the subcloud installed with the active release # when no release parameter provided. - self.assertEqual(SW_VERSION, response.json['software-version']) + self.assertEqual(SW_VERSION, response.json["software-version"]) def test_post_subcloud_release_not_match_install_values_sw(self): """Release parameter not match software_version in the install_values.""" @@ -585,12 +608,18 @@ class TestSubcloudPost(testroot.DCManagerApiTest, params = self.get_post_params() # add bmc_password and release to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8"), - 'release': '21.12'}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ), + "release": "21.12", + } + ) with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.post(self.get_api_prefix(), params=params, upload_files=upload_files, @@ -600,17 +629,18 @@ class TestSubcloudPost(testroot.DCManagerApiTest, # Verify the request was rejected self.assertEqual(response.status_code, http_client.BAD_REQUEST) - @mock.patch.object(psd_common, 'validate_k8s_version') - @mock.patch('dcmanager.common.utils.get_vault_load_files') - def test_post_subcloud_with_release_parameter(self, mock_vault_files, - mock_validate_k8s_version): + @mock.patch.object(psd_common, "validate_k8s_version") + @mock.patch("dcmanager.common.utils.get_vault_load_files") + def test_post_subcloud_with_release_parameter( + self, mock_vault_files, mock_validate_k8s_version + ): """Test POST operation with release parameter.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') - software_version = '21.12' + mock_vault_files.return_value = ("fake_iso", "fake_sig") + software_version = "21.12" # Update the software_version value to match the release parameter value, # otherwise, the request will be rejected - self.install_data['software_version'] = software_version + self.install_data["software_version"] = software_version self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) upload_files = self.get_post_upload_files() @@ -618,12 +648,18 @@ class TestSubcloudPost(testroot.DCManagerApiTest, params = self.get_post_params() # add bmc_password and release to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8"), - 'release': software_version}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ), + "release": software_version, + } + ) with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.post(self.get_api_prefix(), params=params, upload_files=upload_files, @@ -631,31 +667,33 @@ class TestSubcloudPost(testroot.DCManagerApiTest, expect_errors=True) self.assertEqual(response.status_code, http_client.OK) - self.assertEqual(software_version, response.json['software-version']) + self.assertEqual(software_version, response.json["software-version"]) # Revert the software_version value - self.install_data['software_version'] = SW_VERSION + self.install_data["software_version"] = SW_VERSION - @mock.patch.object(psd_common.PatchingClient, 'query') + @mock.patch.object(psd_common.PatchingClient, "query") def test_post_subcloud_when_partial_applied_patch(self, mock_query): """Test POST operation when there is a partial-applied patch.""" upload_files = self.get_post_upload_files() params = self.get_post_params() mock_query.return_value = FAKE_PATCH - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self.assertEqual(http_client.UNPROCESSABLE_ENTITY, response.status_code) - self.assertEqual('text/plain', response.content_type) + self.assertEqual("text/plain", response.content_type) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_values_no_bmc_password(self, mock_vault_files): """Test POST operation with install values is supported by the API.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") # pass a different "install" list of files for this POST self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) @@ -664,24 +702,32 @@ class TestSubcloudPost(testroot.DCManagerApiTest, params = self.get_post_params() # for this unit test, omit adding bmc_password to params - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self._verify_post_failure(response, "bmc_password", None) # add the bmc_password and verify that now it works params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8")}) - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ) + } + ) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) self._verify_post_success(response) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_missing_image(self, mock_vault_files): """Test POST operation without image in install values and vault files.""" @@ -690,30 +736,40 @@ class TestSubcloudPost(testroot.DCManagerApiTest, params = self.get_post_params() # add bmc_password to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8")}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ) + } + ) self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) self.install_data = copy.copy(self.FAKE_INSTALL_DATA) upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self.assertEqual(response.status_code, http_client.BAD_REQUEST) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_values_missing(self, mock_vault_files): """Test POST operation with install values fails if data missing.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") params = self.get_post_params() # add bmc_password to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8")}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ) + } + ) self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) # for each entry in install content, try with one key missing @@ -721,46 +777,56 @@ class TestSubcloudPost(testroot.DCManagerApiTest, self.install_data = copy.copy(self.FAKE_INSTALL_DATA) del self.install_data[key] upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self._verify_post_failure(response, key, None) - @mock.patch('dcmanager.common.utils.get_vault_load_files') - @mock.patch.object(cutils, 'get_playbook_for_software_version') - @mock.patch.object(cutils, 'get_value_from_yaml_file') - def test_post_subcloud_bad_kubernetes_version(self, - mock_get_value_from_yaml_file, - mock_get_playbook_for_software_version, - mock_vault_files): + @mock.patch("dcmanager.common.utils.get_vault_load_files") + @mock.patch.object(cutils, "get_playbook_for_software_version") + @mock.patch.object(cutils, "get_value_from_yaml_file") + def test_post_subcloud_bad_kubernetes_version( + self, + mock_get_value_from_yaml_file, + mock_get_playbook_for_software_version, + mock_vault_files, + ): """Test POST operation with bad kubernetes_version.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") - software_version = '21.12' + software_version = "21.12" # Update the software_version value to match the release parameter value, # otherwise, the request will be rejected - self.install_data['software_version'] = software_version + self.install_data["software_version"] = software_version params = self.get_post_params() # add bmc_password to params params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8"), - 'release': software_version}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ), + "release": software_version, + } + ) # Add kubernetes version to bootstrap_data - self.bootstrap_data['kubernetes_version'] = '1.21.8' - mock_get_value_from_yaml_file.return_value = '1.23.1' + self.bootstrap_data["kubernetes_version"] = "1.21.8" + mock_get_value_from_yaml_file.return_value = "1.23.1" self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) self.install_data = copy.copy(self.FAKE_INSTALL_DATA) upload_files = self.get_post_upload_files() with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.post(self.get_api_prefix(), params=params, upload_files=upload_files, @@ -770,12 +836,11 @@ class TestSubcloudPost(testroot.DCManagerApiTest, self.assertEqual(response.status_code, http_client.BAD_REQUEST) # Revert the change of bootstrap_data - del self.bootstrap_data['kubernetes_version'] + del self.bootstrap_data["kubernetes_version"] - def _test_post_input_value_inputs(self, - setup_overrides, - required_overrides, - param_key, bad_values, good_value): + def _test_post_input_value_inputs( + self, setup_overrides, required_overrides, param_key, bad_values, good_value + ): """This utility checks for test permutions. The setup_overrides are the initial modifications to the install data @@ -786,8 +851,12 @@ class TestSubcloudPost(testroot.DCManagerApiTest, """ params = self.get_post_params() params.update( - {'bmc_password': - base64.b64encode('fake pass'.encode("utf-8")).decode("utf-8")}) + { + "bmc_password": base64.b64encode("fake pass".encode("utf-8")).decode( + "utf-8" + ) + } + ) self.set_list_of_post_files(subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS) # Setup starting install data @@ -795,7 +864,7 @@ class TestSubcloudPost(testroot.DCManagerApiTest, starting_data = copy.copy(self.FAKE_INSTALL_DATA) for key, val in setup_overrides.items(): starting_data[key] = val - starting_data['image'] = 'fake image' + starting_data["image"] = "fake image" # Test all the bad param values for bad_value in bad_values: @@ -806,11 +875,13 @@ class TestSubcloudPost(testroot.DCManagerApiTest, # Apply the bad value self.install_data[param_key] = bad_value upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self._verify_post_failure(response, param_key, bad_value) # Test that any missing override required to use with the good value @@ -824,11 +895,13 @@ class TestSubcloudPost(testroot.DCManagerApiTest, # The 'good' value should still fail if a required override missing self.install_data[param_key] = good_value upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers(), - expect_errors=True) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + expect_errors=True, + ) self._verify_post_failure(response, param_key, bad_value) # Test that a good value and all required overrides works @@ -837,104 +910,116 @@ class TestSubcloudPost(testroot.DCManagerApiTest, self.install_data[key] = val self.install_data[param_key] = good_value upload_files = self.get_post_upload_files() - response = self.app.post(self.get_api_prefix(), - params=params, - upload_files=upload_files, - headers=self.get_api_headers()) + response = self.app.post( + self.get_api_prefix(), + params=params, + upload_files=upload_files, + headers=self.get_api_headers(), + ) self._verify_post_success(response) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_values_invalid_type(self, mock_vault_files): """Test POST with an invalid type specified in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") setup_overrides = {} required_overrides = {} # the install_type must a number 0 <= X <=5 install_key = "install_type" - bad_values = [-1, # negative - 6, # too big - "3", # alphbetical - "w", # really alphbetical - "", # empty - None, # None - ] + bad_values = [ + -1, # negative + 6, # too big + "3", # alphbetical + "w", # really alphbetical + "", # empty + None, # None + ] good_value = 3 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_bad_bootstrap_ip(self, mock_vault_files): """Test POST with invalid boostrap ip specified in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") setup_overrides = {} required_overrides = {} install_key = "bootstrap_address" - bad_values = ["192.168.1.256", # 256 is not valid - "192.168.206.wut", # including letters in the IP - None, # None - ] + bad_values = [ + "192.168.1.256", # 256 is not valid + "192.168.206.wut", # including letters in the IP + None, # None + ] # Note: an incomplete IP address is 10.10.10 is considered valid good_value = "10.10.10.12" - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_bad_bmc_ip(self, mock_vault_files): """Test POST with invalid bmc ip specified in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") setup_overrides = {} required_overrides = {} install_key = "bmc_address" - bad_values = ["128.224.64.256", # 256 is not valid - "128.224.64.wut", # including letters in the IP - None, # None - ] + bad_values = [ + "128.224.64.256", # 256 is not valid + "128.224.64.wut", # including letters in the IP + None, # None + ] good_value = "128.224.64.1" - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_bad_persistent_size(self, mock_vault_files): """Test POST with invalid persistent_size specified in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") setup_overrides = {} required_overrides = {} install_key = "persistent_size" - bad_values = ["4000o", # not an integer - "20000", # less than 30000 - 40000.1, # fraction - None, # None - ] + bad_values = [ + "4000o", # not an integer + "20000", # less than 30000 + 40000.1, # fraction + None, # None + ] good_value = 40000 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_bad_nexthop_gateway(self, mock_vault_files): """Test POST with invalid nexthop_gateway in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") setup_overrides = {} required_overrides = {} # nexthop_gateway is not required. but if provided, it must be valid install_key = "nexthop_gateway" - bad_values = ["128.224.64.256", # 256 is not valid - "128.224.64.wut", # including letters in the IP - None, # None - ] + bad_values = [ + "128.224.64.256", # 256 is not valid + "128.224.64.wut", # including letters in the IP + None, # None + ] good_value = "192.168.1.2" - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_bad_network_address(self, mock_vault_files): """Test POST with invalid network_address in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") setup_overrides = {} # The nexthop_gateway is required when network_address is present # The network mask is required when network address is present @@ -945,291 +1030,322 @@ class TestSubcloudPost(testroot.DCManagerApiTest, # network_address is not required. but if provided, it must be valid install_key = "network_address" # todo(abailey): None will cause the API to fail - bad_values = ["fd01:6::0", # mis-match ipv6 vs ipv4 - ] + bad_values = [ + "fd01:6::0", # mis-match ipv6 vs ipv4 + ] good_value = "192.168.101.10" # ipv4 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_bad_network_mask(self, mock_vault_files): """Test POST with invalid network_mask in install values.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") # network_address is not required. but if provided a valid network_mask # is needed setup_overrides = { "nexthop_gateway": "192.168.1.2", - "network_address": "192.168.101.10" + "network_address": "192.168.101.10", } required_overrides = {} install_key = "network_mask" - bad_values = [None, # None - 64, # network_mask cannot really be greater than 32 - -1, # network_mask cannot really be negative - "junk", # network_mask cannot be a junk string - ] + bad_values = [ + None, # None + 64, # network_mask cannot really be greater than 32 + -1, # network_mask cannot really be negative + "junk", # network_mask cannot be a junk string + ] good_value = 32 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_diff_bmc_ip_version(self, mock_vault_files): """Test POST install values with mismatched(ipv4/ipv6) bmc ip.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') - setup_overrides = { - "bootstrap_address": "192.168.1.2" - } + mock_vault_files.return_value = ("fake_iso", "fake_sig") + setup_overrides = {"bootstrap_address": "192.168.1.2"} required_overrides = {} # bootstrap address ip version must match bmc_address. default ipv4 install_key = "bmc_address" - bad_values = ["fd01:6::7", # ipv6 - None, # None - "192.168.-1.1", # bad ipv4 - ] - good_value = "192.168.1.7" # ipv4 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + bad_values = [ + "fd01:6::7", # ipv6 + None, # None + "192.168.-1.1", # bad ipv4 + ] + good_value = "192.168.1.7" # ipv4 + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_diff_bmc_ip_version_ipv6(self, mock_vault_files): """Test POST install values with mismatched(ipv6/ipv4) bmc ip.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") # version of bootstrap address must be same as bmc_address - setup_overrides = { - "bootstrap_address": "fd01:6::7" - } + setup_overrides = {"bootstrap_address": "fd01:6::7"} required_overrides = {} install_key = "bmc_address" - bad_values = ["192.168.1.7", # ipv4 - None, # None - "fd01:6:-1", # bad ipv6 - ] - good_value = "fd01:6::7" # ipv6 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + bad_values = [ + "192.168.1.7", # ipv4 + None, # None + "fd01:6:-1", # bad ipv6 + ] + good_value = "fd01:6::7" # ipv6 + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') + @mock.patch("dcmanager.common.utils.get_vault_load_files") def test_post_subcloud_install_diff_nexthop_ip_version(self, mock_vault_files): """Test POST install values mismatched(ipv4/ipv6) nexthop_gateway.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") # ip version of bootstrap address must be same as nexthop_gateway # All required addresses (like bmc address) much match bootstrap # default bmc address is ipv4 - setup_overrides = { - "bootstrap_address": "192.168.1.5" - } + setup_overrides = {"bootstrap_address": "192.168.1.5"} required_overrides = {} install_key = "nexthop_gateway" - bad_values = ["fd01:6::7", ] # ipv6 - good_value = "192.168.1.7" # ipv4 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + bad_values = [ + "fd01:6::7", + ] # ipv6 + good_value = "192.168.1.7" # ipv4 + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) - @mock.patch('dcmanager.common.utils.get_vault_load_files') - def test_post_subcloud_install_diff_nexthop_ip_version_ipv6(self, - mock_vault_files): + @mock.patch("dcmanager.common.utils.get_vault_load_files") + def test_post_subcloud_install_diff_nexthop_ip_version_ipv6( + self, mock_vault_files + ): """Test POST install values with mismatched(ipv6/ipv4) bmc ip.""" - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + mock_vault_files.return_value = ("fake_iso", "fake_sig") # version of bootstrap address must be same as nexthop_gateway # All required addresses must also be setup ipv6 such as bmc_address # default bmc address is ipv4 - setup_overrides = { - "bootstrap_address": "fd01:6::6" - } - required_overrides = { - "bmc_address": "fd01:6::7" - } + setup_overrides = {"bootstrap_address": "fd01:6::6"} + required_overrides = {"bmc_address": "fd01:6::7"} install_key = "nexthop_gateway" - bad_values = ["192.168.1.7", ] # ipv4 - good_value = "fd01:6::8" # ipv6 - self._test_post_input_value_inputs(setup_overrides, required_overrides, - install_key, bad_values, good_value) + bad_values = [ + "192.168.1.7", + ] # ipv4 + good_value = "fd01:6::8" # ipv6 + self._test_post_input_value_inputs( + setup_overrides, required_overrides, install_key, bad_values, good_value + ) class TestSubcloudAPIOther(testroot.DCManagerApiTest): """Test GET, delete and patch API calls""" + def setUp(self): super(TestSubcloudAPIOther, self).setUp() self.ctx = utils.dummy_context() - p = mock.patch.object(rpc_client, 'SubcloudStateClient') + p = mock.patch.object(rpc_client, "SubcloudStateClient") self.mock_rpc_state_client = p.start() self.addCleanup(p.stop) - p = mock.patch.object(rpc_client, 'ManagerClient') + p = mock.patch.object(rpc_client, "ManagerClient") self.mock_rpc_client = p.start() self.addCleanup(p.stop) - p = mock.patch.object(psd_common, 'get_ks_client') + p = mock.patch.object(psd_common, "get_ks_client") self.mock_get_ks_client = p.start() self.addCleanup(p.stop) def test_delete_subcloud(self): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - delete_url = FAKE_URL + '/' + str(subcloud.id) + delete_url = FAKE_URL + "/" + str(subcloud.id) self.mock_rpc_client().delete_subcloud.return_value = True response = self.app.delete_json(delete_url, headers=FAKE_HEADERS) self.mock_rpc_client().delete_subcloud.assert_called_once_with( - mock.ANY, mock.ANY) + mock.ANY, mock.ANY + ) self.assertEqual(response.status_int, 200) def test_delete_wrong_request(self): - delete_url = WRONG_URL + '/' + FAKE_ID - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.delete_json, delete_url, - headers=FAKE_HEADERS) + delete_url = WRONG_URL + "/" + FAKE_ID + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.delete_json, + delete_url, + headers=FAKE_HEADERS, + ) - @mock.patch.object(subclouds.SubcloudsController, '_get_oam_addresses') - def test_get_subcloud(self, - mock_get_oam_addresses): + @mock.patch.object(subclouds.SubcloudsController, "_get_oam_addresses") + def test_get_subcloud(self, mock_get_oam_addresses): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - get_url = FAKE_URL + '/' + str(subcloud.id) + get_url = FAKE_URL + "/" + str(subcloud.id) response = self.app.get(get_url, headers=FAKE_HEADERS) - self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.content_type, "application/json") self.assertEqual(response.status_code, http_client.OK) - self.assertEqual(response.json.get('oam_floating_ip', None), None) - self.assertEqual(response.json['name'], subcloud.name) + self.assertEqual(response.json.get("oam_floating_ip", None), None) + self.assertEqual(response.json["name"], subcloud.name) - @mock.patch.object(subclouds.SubcloudsController, - '_get_deploy_config_sync_status') - @mock.patch.object(subclouds.SubcloudsController, '_get_oam_addresses') - def test_get_online_subcloud_with_additional_detail(self, - mock_get_oam_addresses, - mock_get_deploy_config_sync_status): + @mock.patch.object( + subclouds.SubcloudsController, "_get_deploy_config_sync_status" + ) + @mock.patch.object(subclouds.SubcloudsController, "_get_oam_addresses") + def test_get_online_subcloud_with_additional_detail( + self, mock_get_oam_addresses, mock_get_deploy_config_sync_status + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) updated_subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE) + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + ) - get_url = FAKE_URL + '/' + str(updated_subcloud.id) + '/detail' - oam_addresses = FakeOAMAddressPool('10.10.10.254', - '10.10.10.1', - '10.10.10.254', - '10.10.10.4', - '10.10.10.3', - '10.10.10.1', - '10.10.10.2') + get_url = FAKE_URL + "/" + str(updated_subcloud.id) + "/detail" + oam_addresses = FakeOAMAddressPool( + "10.10.10.254", + "10.10.10.1", + "10.10.10.254", + "10.10.10.4", + "10.10.10.3", + "10.10.10.1", + "10.10.10.2", + ) mock_get_oam_addresses.return_value = oam_addresses - mock_get_deploy_config_sync_status.return_value = dccommon_consts.DEPLOY_CONFIG_UP_TO_DATE + mock_get_deploy_config_sync_status.return_value = ( + dccommon_consts.DEPLOY_CONFIG_UP_TO_DATE + ) response = self.app.get(get_url, headers=FAKE_HEADERS) - self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.content_type, "application/json") self.assertEqual(response.status_code, http_client.OK) - self.assertEqual('10.10.10.2', response.json['oam_floating_ip']) + self.assertEqual("10.10.10.2", response.json["oam_floating_ip"]) self.assertEqual( - 'Deployment: configurations up-to-date', response.json['deploy_config_sync_status']) + "Deployment: configurations up-to-date", + response.json["deploy_config_sync_status"], + ) def test_get_offline_subcloud_with_additional_detail(self): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - get_url = FAKE_URL + '/' + str(subcloud.id) + '/detail' + get_url = FAKE_URL + "/" + str(subcloud.id) + "/detail" response = self.app.get(get_url, headers=FAKE_HEADERS) - self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.content_type, "application/json") self.assertEqual(response.status_code, http_client.OK) - self.assertEqual('unavailable', response.json['oam_floating_ip']) - self.assertEqual('unknown', response.json['deploy_config_sync_status']) + self.assertEqual("unavailable", response.json["oam_floating_ip"]) + self.assertEqual("unknown", response.json["deploy_config_sync_status"]) - @mock.patch.object(subclouds.SubcloudsController, - '_get_deploy_config_sync_status') - @mock.patch.object(subclouds.SubcloudsController, '_get_oam_addresses') - def test_get_subcloud_deploy_config_status_unknown(self, - mock_get_oam_addresses, - mock_get_deploy_config_sync_status): + @mock.patch.object( + subclouds.SubcloudsController, "_get_deploy_config_sync_status" + ) + @mock.patch.object(subclouds.SubcloudsController, "_get_oam_addresses") + def test_get_subcloud_deploy_config_status_unknown( + self, mock_get_oam_addresses, mock_get_deploy_config_sync_status + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) updated_subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE) - get_url = FAKE_URL + '/' + str(updated_subcloud.id) + '/detail' + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + ) + get_url = FAKE_URL + "/" + str(updated_subcloud.id) + "/detail" mock_get_oam_addresses.return_value = None mock_get_deploy_config_sync_status.return_value = None response = self.app.get(get_url, headers=FAKE_HEADERS) - self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.content_type, "application/json") self.assertEqual(response.status_code, http_client.OK) - self.assertEqual('unknown', response.json['deploy_config_sync_status']) + self.assertEqual("unknown", response.json["deploy_config_sync_status"]) - @mock.patch.object(subclouds.SubcloudsController, '_get_oam_addresses') + @mock.patch.object(subclouds.SubcloudsController, "_get_oam_addresses") def test_get_subcloud_oam_ip_unavailable(self, mock_get_oam_addresses): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) updated_subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE) + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + ) - get_url = FAKE_URL + '/' + str(updated_subcloud.id) + '/detail' - self.mock_get_ks_client.return_value = 'ks_client' + get_url = FAKE_URL + "/" + str(updated_subcloud.id) + "/detail" + self.mock_get_ks_client.return_value = "ks_client" mock_get_oam_addresses.return_value = None response = self.app.get(get_url, headers=FAKE_HEADERS) - self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.content_type, "application/json") self.assertEqual(response.status_code, http_client.OK) - self.assertEqual('unavailable', response.json['oam_floating_ip']) + self.assertEqual("unavailable", response.json["oam_floating_ip"]) def test_get_wrong_request(self): - get_url = WRONG_URL + '/' + FAKE_ID - six.assertRaisesRegex(self, webtest.app.AppError, "404 *", - self.app.get, get_url, - headers=FAKE_HEADERS) + get_url = WRONG_URL + "/" + FAKE_ID + six.assertRaisesRegex( + self, + webtest.app.AppError, + "404 *", + self.app.get, + get_url, + headers=FAKE_HEADERS, + ) def test_get_subcloud_all(self): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) get_url = FAKE_URL response = self.app.get(get_url, headers=FAKE_HEADERS) - self.assertEqual(response.json['subclouds'][0]['name'], subcloud.name) + self.assertEqual(response.json["subclouds"][0]["name"], subcloud.name) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'management-state': dccommon_consts.MANAGEMENT_UNMANAGED} + data = {"management-state": dccommon_consts.MANAGEMENT_UNMANAGED} self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = data - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id), headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) # Verify subcloud was updated with correct values updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) - self.assertEqual(dccommon_consts.MANAGEMENT_UNMANAGED, - updated_subcloud.management_state) + self.assertEqual( + dccommon_consts.MANAGEMENT_UNMANAGED, updated_subcloud.management_state + ) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_update_subcloud_group_value(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) good_values = [1, "1"] expected_group_id = 1 for x in good_values: - data = {'group_id': x} + data = {"group_id": x} self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = data - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id), headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) # Verify subcloud was updated with correct values - updated_subcloud = db_api.subcloud_get_by_name(self.ctx, - subcloud.name) - self.assertEqual(expected_group_id, - updated_subcloud.group_id) + updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) + self.assertEqual(expected_group_id, updated_subcloud.group_id) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_update_subcloud_group_value_by_name(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) expected_group_id = 1 - data = {'group_id': 'Default'} + data = {"group_id": "Default"} self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = data - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id), headers=FAKE_HEADERS, params=data + ) self.assertEqual(response.status_int, 200) # Verify subcloud was updated with correct values updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) - self.assertEqual(expected_group_id, - updated_subcloud.group_id) + self.assertEqual(expected_group_id, updated_subcloud.group_id) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_update_subcloud_group_bad_value(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) # There is only 1 subcloud group 'Default' which has id '1' @@ -1237,39 +1353,43 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): # all get rejected bad_values = [0, -1, 2, "0", "-1", 0.5, "BadName", "False", "True"] for x in bad_values: - data = {'group_id': x} + data = {"group_id": x} self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = data - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data, - expect_errors=True) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + expect_errors=True, + ) self.assertEqual(response.status_int, 400) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') - @mock.patch.object(cutils, 'get_vault_load_files') - def test_update_subcloud_install_values_persistent_size(self, mock_vault_files, - mock_get_patch_data): - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") + @mock.patch.object(cutils, "get_vault_load_files") + def test_update_subcloud_install_values_persistent_size( + self, mock_vault_files, mock_get_patch_data + ): + mock_vault_files.return_value = ("fake_iso", "fake_sig") subcloud = fake_subcloud.create_fake_subcloud(self.ctx, data_install=None) payload = {} install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES_WITH_PERSISTENT_SIZE) - encoded_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') - data = {'bmc_password': encoded_password} - payload.update({'install_values': install_data}) + encoded_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) + data = {"bmc_password": encoded_password} + payload.update({"install_values": install_data}) payload.update(data) self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = payload fake_content = "fake content".encode("utf-8") - response = self.app.patch(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data, - upload_files=[("install_values", - "fake_name", - fake_content)]) - install_data.update({'bmc_password': encoded_password}) + response = self.app.patch( + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + upload_files=[("install_values", "fake_name", fake_content)], + ) + install_data.update({"bmc_password": encoded_password}) self.mock_rpc_client().update_subcloud.assert_called_once_with( mock.ANY, subcloud.id, @@ -1285,66 +1405,81 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): deploy_status=None) self.assertEqual(response.status_int, 200) - @mock.patch.object(psd_common, 'get_network_address_pool') - @mock.patch.object(subclouds.SubcloudsController, - '_validate_network_reconfiguration') - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(psd_common, "get_network_address_pool") + @mock.patch.object( + subclouds.SubcloudsController, "_validate_network_reconfiguration" + ) + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud_network_values( - self, mock_get_patch_data, mock_validate_network_reconfiguration, - mock_mgmt_address_pool): + self, + mock_get_patch_data, + mock_validate_network_reconfiguration, + mock_mgmt_address_pool, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) db_api.subcloud_update( - self.ctx, subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) - fake_password = ( - base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - payload = {'sysadmin_password': fake_password, - 'bootstrap_address': "192.168.102.2", - 'management_subnet': "192.168.102.0/24", - 'management_start_ip': "192.168.102.5", - 'management_end_ip': "192.168.102.49", - 'management_gateway_ip': "192.168.102.1"} + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + ) + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + payload = { + "sysadmin_password": fake_password, + "bootstrap_address": "192.168.102.2", + "management_subnet": "192.168.102.0/24", + "management_start_ip": "192.168.102.5", + "management_end_ip": "192.168.102.49", + "management_gateway_ip": "192.168.102.1", + } - fake_management_address_pool = FakeAddressPool('192.168.204.0', 24, - '192.168.204.2', - '192.168.204.100') + fake_management_address_pool = FakeAddressPool( + "192.168.204.0", 24, "192.168.204.2", "192.168.204.100" + ) mock_mgmt_address_pool.return_value = fake_management_address_pool - self.mock_rpc_client().update_subcloud_with_network_reconfig.return_value = True + self.mock_rpc_client().update_subcloud_with_network_reconfig.return_value = ( + True + ) mock_get_patch_data.return_value = payload - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=payload) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id), headers=FAKE_HEADERS, params=payload + ) self.assertEqual(response.status_int, 200) mock_validate_network_reconfiguration.assert_called_once() self.mock_rpc_client().update_subcloud_with_network_reconfig.\ - assert_called_once_with(mock.ANY, subcloud.id, payload) + assert_called_once_with( + mock.ANY, subcloud.id, payload + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') - @mock.patch.object(cutils, 'get_vault_load_files') - def test_patch_subcloud_install_values(self, mock_vault_files, - mock_get_patch_data): - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") + @mock.patch.object(cutils, "get_vault_load_files") + def test_patch_subcloud_install_values( + self, mock_vault_files, mock_get_patch_data + ): + mock_vault_files.return_value = ("fake_iso", "fake_sig") subcloud = fake_subcloud.create_fake_subcloud(self.ctx, data_install=None) payload = {} install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - encoded_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') - data = {'bmc_password': encoded_password} - payload.update({'install_values': install_data}) + encoded_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) + data = {"bmc_password": encoded_password} + payload.update({"install_values": install_data}) payload.update(data) self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = payload fake_content = "fake content".encode("utf-8") - response = self.app.patch(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data, - upload_files=[("install_values", - "fake_name", - fake_content)]) - install_data.update({'bmc_password': encoded_password}) + response = self.app.patch( + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + upload_files=[("install_values", "fake_name", fake_content)], + ) + install_data.update({"bmc_password": encoded_password}) self.mock_rpc_client().update_subcloud.assert_called_once_with( mock.ANY, subcloud.id, @@ -1360,32 +1495,35 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): deploy_status=None) self.assertEqual(response.status_int, 200) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') - @mock.patch.object(cutils, 'get_vault_load_files') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") + @mock.patch.object(cutils, "get_vault_load_files") def test_patch_subcloud_install_values_with_existing_data_install( - self, mock_vault_files, mock_get_patch_data): - mock_vault_files.return_value = ('fake_iso', 'fake_sig') + self, mock_vault_files, mock_get_patch_data + ): + mock_vault_files.return_value = ("fake_iso", "fake_sig") install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, data_install=json.dumps(install_data)) + self.ctx, data_install=json.dumps(install_data) + ) install_data.update({"install_type": 2}) payload = {} - encoded_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') - data = {'bmc_password': encoded_password} - payload.update({'install_values': install_data}) + encoded_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) + data = {"bmc_password": encoded_password} + payload.update({"install_values": install_data}) payload.update(data) self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = payload fake_content = "fake content".encode("utf-8") - response = self.app.patch(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=data, - upload_files=[("install_values", - "fake_name", - fake_content)]) - install_data.update({'bmc_password': encoded_password}) + response = self.app.patch( + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + upload_files=[("install_values", "fake_name", fake_content)], + ) + install_data.update({"bmc_password": encoded_password}) self.mock_rpc_client().update_subcloud.assert_called_once_with( mock.ANY, subcloud.id, @@ -1401,58 +1539,84 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): deploy_status=None) self.assertEqual(response.status_int, 200) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud_no_body(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) data = {} mock_get_patch_data.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, - FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud_bad_status(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'management-state': 'bad-status'} + data = {"management-state": "bad-status"} mock_get_patch_data.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, - FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud_bad_force_value(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'management-state': dccommon_consts.MANAGEMENT_MANAGED, - 'force': 'bad-value'} + data = { + "management-state": dccommon_consts.MANAGEMENT_MANAGED, + "force": "bad-value", + } mock_get_patch_data.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, - FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud_forced_unmanaged(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'management-state': dccommon_consts.MANAGEMENT_UNMANAGED, - 'force': True} + data = { + "management-state": dccommon_consts.MANAGEMENT_UNMANAGED, + "force": True, + } mock_get_patch_data.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, - FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id), + headers=FAKE_HEADERS, + params=data, + ) - @mock.patch.object(subclouds.SubcloudsController, '_get_patch_data') + @mock.patch.object(subclouds.SubcloudsController, "_get_patch_data") def test_patch_subcloud_forced_manage(self, mock_get_patch_data): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - payload = {'management-state': dccommon_consts.MANAGEMENT_MANAGED, - 'force': True} + payload = { + "management-state": dccommon_consts.MANAGEMENT_MANAGED, + "force": True, + } self.mock_rpc_client().update_subcloud.return_value = True mock_get_patch_data.return_value = payload - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id), - headers=FAKE_HEADERS, - params=payload) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id), headers=FAKE_HEADERS, params=payload + ) self.mock_rpc_client().update_subcloud.assert_called_once_with( mock.ANY, mock.ANY, @@ -1471,13 +1635,17 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): @mock.patch.object(subclouds.SubcloudsController, '_get_updatestatus_payload') def test_subcloud_updatestatus(self, mock_get_updatestatus_payload): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'endpoint': 'dc-cert', 'status': 'in-sync'} + data = {"endpoint": "dc-cert", "status": "in-sync"} mock_get_updatestatus_payload.return_value = data - self.mock_rpc_state_client().update_subcloud_endpoint_status.return_value = True + self.mock_rpc_state_client().update_subcloud_endpoint_status.return_value = ( + True + ) response = self.app.patch_json( - FAKE_URL + '/' + str(subcloud.id) + '/update_status', - data, headers=FAKE_HEADERS) + FAKE_URL + "/" + str(subcloud.id) + "/update_status", + data, + headers=FAKE_HEADERS, + ) self.mock_rpc_state_client().update_subcloud_endpoint_status.\ assert_called_once_with(mock.ANY, subcloud.name, subcloud.region_name, @@ -1485,63 +1653,82 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): self.assertEqual(response.status_int, 200) - @mock.patch.object(subclouds.SubcloudsController, '_get_updatestatus_payload') + @mock.patch.object(subclouds.SubcloudsController, "_get_updatestatus_payload") def test_subcloud_updatestatus_invalid_endpoint( - self, mock_get_updatestatus_payload): + self, mock_get_updatestatus_payload + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'endpoint': 'any-other-endpoint', 'status': 'in-sync'} + data = {"endpoint": "any-other-endpoint", "status": "in-sync"} mock_get_updatestatus_payload.return_value = data self.mock_rpc_client().update_subcloud_endpoint_status.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/update_status', - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/update_status", + headers=FAKE_HEADERS, + params=data, + ) self.mock_rpc_client().update_subcloud_endpoint_status.assert_not_called() - @mock.patch.object(subclouds.SubcloudsController, '_get_updatestatus_payload') + @mock.patch.object(subclouds.SubcloudsController, "_get_updatestatus_payload") def test_subcloud_updatestatus_invalid_status( - self, mock_get_updatestatus_payload): + self, mock_get_updatestatus_payload + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'endpoint': 'dc-cert', 'status': 'not-sure'} + data = {"endpoint": "dc-cert", "status": "not-sure"} mock_get_updatestatus_payload.return_value = data self.mock_rpc_client().update_subcloud_endpoint_status.return_value = True - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/update_status', - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/update_status", + headers=FAKE_HEADERS, + params=data, + ) self.mock_rpc_client().update_subcloud_endpoint_status.assert_not_called() def test_get_config_file_path(self): bootstrap_file = psd_common.get_config_file_path("subcloud1") - install_values = psd_common.get_config_file_path("subcloud1", - "install_values") - deploy_config = psd_common.get_config_file_path("subcloud1", - consts.DEPLOY_CONFIG) - self.assertEqual(bootstrap_file, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml') - self.assertEqual(install_values, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml') - self.assertEqual(deploy_config, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml') + install_values = psd_common.get_config_file_path( + "subcloud1", "install_values" + ) + deploy_config = psd_common.get_config_file_path( + "subcloud1", consts.DEPLOY_CONFIG + ) + self.assertEqual( + bootstrap_file, f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml" + ) + self.assertEqual( + install_values, + f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml", + ) + self.assertEqual( + deploy_config, + f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml", + ) def test_format_ip_address(self): fake_payload = {} good_values = { - '10.10.10.3': '10.10.10.3', - '2620:10a:a001:a103::1135': '2620:10a:a001:a103::1135', - '2620:10A:A001:A103::1135': '2620:10a:a001:a103::1135', # with upper case letters - '2620:010a:a001:a103::1135': '2620:10a:a001:a103::1135', # with leading zeros - '2620:10a:a001:a103:0000::1135': '2620:10a:a001:a103::1135' # with a string of zeros - } + "10.10.10.3": "10.10.10.3", + "2620:10a:a001:a103::1135": "2620:10a:a001:a103::1135", + "2620:10A:A001:A103::1135": "2620:10a:a001:a103::1135", + "2620:010a:a001:a103::1135": "2620:10a:a001:a103::1135", + "2620:10a:a001:a103:0000::1135": "2620:10a:a001:a103::1135", + } for k, v in good_values.items(): - fake_payload['bootstrap-address'] = k + fake_payload["bootstrap-address"] = k psd_common.format_ip_address(fake_payload) - self.assertEqual(fake_payload['bootstrap-address'], v) + self.assertEqual(fake_payload["bootstrap-address"], v) fake_payload[consts.INSTALL_VALUES] = {} for k, v in good_values.items(): @@ -1553,13 +1740,15 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): fake_payload[consts.INSTALL_VALUES]['othervalues2'] = 'othervalues2' psd_common.format_ip_address(fake_payload) self.assertEqual(fake_payload['othervalues1'], 'othervalues1') - self.assertEqual(fake_payload[consts.INSTALL_VALUES]['othervalues2'], 'othervalues2') + self.assertEqual(fake_payload[consts.INSTALL_VALUES]['othervalues2'], + 'othervalues2') def test_get_subcloud_db_install_values(self): install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - encoded_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') - install_data['bmc_password'] = encoded_password + encoded_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) + install_data["bmc_password"] = encoded_password test_subcloud = copy.copy(FAKE_SUBCLOUD_DATA) subcloud_info = Subcloud(test_subcloud, False) subcloud_info.data_install = json.dumps(install_data) @@ -1568,11 +1757,11 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): self.assertEqual( json.loads(json.dumps(install_data)), - json.loads(json.dumps(actual_result))) + json.loads(json.dumps(actual_result)), + ) - @mock.patch.object(keyring, 'get_password') - def test_get_subcloud_db_install_values_without_bmc_password( - self, mock_keyring): + @mock.patch.object(keyring, "get_password") + def test_get_subcloud_db_install_values_without_bmc_password(self, mock_keyring): install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) subcloud = fake_subcloud.create_fake_subcloud( self.ctx, data_install=json.dumps(install_data)) @@ -1591,153 +1780,220 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): @mock.patch.object(psd_common, 'validate_subcloud_config') @mock.patch.object(psd_common, 'validate_bootstrap_values') def test_redeploy_subcloud( - self, mock_validate_bootstrap_values, mock_validate_subcloud_config, - mock_validate_k8s_version, mock_get_vault_load_files, - mock_os_listdir, mock_os_isdir, mock_query, mock_upload_config_file): - - fake_bmc_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') + self, + mock_validate_bootstrap_values, + mock_validate_subcloud_config, + mock_validate_k8s_version, + mock_get_vault_load_files, + mock_os_listdir, + mock_os_isdir, + mock_query, + mock_upload_config_file, + ): + fake_bmc_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) fake_sysadmin_password = base64.b64encode( - 'sysadmin_password'.encode("utf-8")).decode('utf-8') + "sysadmin_password".encode("utf-8") + ).decode("utf-8") install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - install_data.pop('software_version') + install_data.pop("software_version") bootstrap_data = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) - config_data = {'deploy_config': 'deploy config values'} - redeploy_data = {**install_data, **bootstrap_data, **config_data, - 'sysadmin_password': fake_sysadmin_password, - 'bmc_password': fake_bmc_password} + config_data = {"deploy_config": "deploy config values"} + redeploy_data = { + **install_data, + **bootstrap_data, + **config_data, + "sysadmin_password": fake_sysadmin_password, + "bmc_password": fake_bmc_password, + } subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=bootstrap_data["name"]) + self.ctx, name=bootstrap_data["name"] + ) mock_query.return_value = {} - mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path") mock_os_isdir.return_value = True mock_upload_config_file.return_value = True - mock_os_listdir.return_value = ['deploy_chart_fake.tgz', - 'deploy_overrides_fake.yaml', - 'deploy_playbook_fake.yaml'] + mock_os_listdir.return_value = [ + "deploy_chart_fake.tgz", + "deploy_overrides_fake.yaml", + "deploy_playbook_fake.yaml", + ] - upload_files = [("install_values", "install_fake_filename", - json.dumps(install_data).encode("utf-8")), - ("bootstrap_values", "bootstrap_fake_filename", - json.dumps(bootstrap_data).encode("utf-8")), - ("deploy_config", "config_fake_filename", - json.dumps(config_data).encode("utf-8"))] + upload_files = [ + ( + "install_values", + "install_fake_filename", + json.dumps(install_data).encode("utf-8"), + ), + ( + "bootstrap_values", + "bootstrap_fake_filename", + json.dumps(bootstrap_data).encode("utf-8"), + ), + ( + "deploy_config", + "config_fake_filename", + json.dumps(config_data).encode("utf-8"), + ), + ] response = self.app.patch( - FAKE_URL + '/' + str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params=redeploy_data, - upload_files=upload_files) + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params=redeploy_data, + upload_files=upload_files, + ) mock_validate_bootstrap_values.assert_called_once() mock_validate_subcloud_config.assert_called_once() mock_validate_k8s_version.assert_called_once() self.mock_rpc_client().redeploy_subcloud.assert_called_once_with( - mock.ANY, - subcloud.id, - mock.ANY) + mock.ANY, subcloud.id, mock.ANY + ) self.assertEqual(response.status_int, 200) - self.assertEqual(SW_VERSION, response.json['software-version']) + self.assertEqual(SW_VERSION, response.json["software-version"]) - @mock.patch.object(cutils, 'load_yaml_file') - @mock.patch.object(psd_common.PatchingClient, 'query') - @mock.patch.object(os.path, 'exists') - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(os, 'listdir') - @mock.patch.object(cutils, 'get_vault_load_files') - @mock.patch.object(psd_common, 'validate_k8s_version') + @mock.patch.object(cutils, "load_yaml_file") + @mock.patch.object(psd_common.PatchingClient, "query") + @mock.patch.object(os.path, "exists") + @mock.patch.object(os.path, "isdir") + @mock.patch.object(os, "listdir") + @mock.patch.object(cutils, "get_vault_load_files") + @mock.patch.object(psd_common, "validate_k8s_version") def test_redeploy_subcloud_no_request_data( - self, mock_validate_k8s_version, mock_get_vault_load_files, - mock_os_listdir, mock_os_isdir, mock_path_exists, mock_query, - mock_load_yaml): - - fake_bmc_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') + self, + mock_validate_k8s_version, + mock_get_vault_load_files, + mock_os_listdir, + mock_os_isdir, + mock_path_exists, + mock_query, + mock_load_yaml, + ): + fake_bmc_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) fake_sysadmin_password = base64.b64encode( - 'sysadmin_password'.encode("utf-8")).decode('utf-8') + "sysadmin_password".encode("utf-8") + ).decode("utf-8") install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - install_data.pop('software_version') - install_data['bmc_password'] = fake_bmc_password - redeploy_data = {'sysadmin_password': fake_sysadmin_password} + install_data.pop("software_version") + install_data["bmc_password"] = fake_bmc_password + redeploy_data = {"sysadmin_password": fake_sysadmin_password} subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], - data_install=json.dumps(install_data)) + self.ctx, + name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], + data_install=json.dumps(install_data), + ) - config_file = psd_common.get_config_file_path(subcloud.name, - consts.DEPLOY_CONFIG) + config_file = psd_common.get_config_file_path( + subcloud.name, consts.DEPLOY_CONFIG + ) mock_query.return_value = {} - mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path") mock_os_isdir.return_value = True - mock_os_listdir.return_value = ['deploy_chart_fake.tgz', - 'deploy_overrides_fake.yaml', - 'deploy_playbook_fake.yaml'] + mock_os_listdir.return_value = [ + "deploy_chart_fake.tgz", + "deploy_overrides_fake.yaml", + "deploy_playbook_fake.yaml", + ] mock_path_exists.side_effect = lambda x: True if x == config_file else False mock_load_yaml.return_value = {"software_version": SW_VERSION} response = self.app.patch( - FAKE_URL + '/' + str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params=redeploy_data) + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params=redeploy_data, + ) mock_validate_k8s_version.assert_called_once() self.mock_rpc_client().redeploy_subcloud.assert_called_once_with( - mock.ANY, - subcloud.id, - mock.ANY) + mock.ANY, subcloud.id, mock.ANY + ) self.assertEqual(response.status_int, 200) - self.assertEqual(SW_VERSION, response.json['software-version']) + self.assertEqual(SW_VERSION, response.json["software-version"]) - @mock.patch.object(psd_common, 'upload_config_file') - @mock.patch.object(psd_common.PatchingClient, 'query') - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(os, 'listdir') - @mock.patch.object(cutils, 'get_vault_load_files') - @mock.patch.object(psd_common, 'validate_k8s_version') - @mock.patch.object(psd_common, 'validate_subcloud_config') - @mock.patch.object(psd_common, 'validate_bootstrap_values') + @mock.patch.object(psd_common, "upload_config_file") + @mock.patch.object(psd_common.PatchingClient, "query") + @mock.patch.object(os.path, "isdir") + @mock.patch.object(os, "listdir") + @mock.patch.object(cutils, "get_vault_load_files") + @mock.patch.object(psd_common, "validate_k8s_version") + @mock.patch.object(psd_common, "validate_subcloud_config") + @mock.patch.object(psd_common, "validate_bootstrap_values") def test_redeploy_subcloud_with_release_version( - self, mock_validate_bootstrap_values, mock_validate_subcloud_config, - mock_validate_k8s_version, mock_get_vault_load_files, - mock_os_listdir, mock_os_isdir, mock_query, mock_upload_config_file): - - fake_bmc_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') + self, + mock_validate_bootstrap_values, + mock_validate_subcloud_config, + mock_validate_k8s_version, + mock_get_vault_load_files, + mock_os_listdir, + mock_os_isdir, + mock_query, + mock_upload_config_file, + ): + fake_bmc_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) fake_sysadmin_password = base64.b64encode( - 'sysadmin_password'.encode("utf-8")).decode('utf-8') + "sysadmin_password".encode("utf-8") + ).decode("utf-8") install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - install_data.pop('software_version') + install_data.pop("software_version") bootstrap_data = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) - config_data = {'deploy_config': 'deploy config values'} - redeploy_data = {**install_data, **bootstrap_data, **config_data, - 'sysadmin_password': fake_sysadmin_password, - 'bmc_password': fake_bmc_password, - 'release': fake_subcloud.FAKE_SOFTWARE_VERSION} + config_data = {"deploy_config": "deploy config values"} + redeploy_data = { + **install_data, + **bootstrap_data, + **config_data, + "sysadmin_password": fake_sysadmin_password, + "bmc_password": fake_bmc_password, + "release": fake_subcloud.FAKE_SOFTWARE_VERSION, + } subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=bootstrap_data["name"], - software_version=SW_VERSION) + self.ctx, name=bootstrap_data["name"], software_version=SW_VERSION + ) mock_query.return_value = {} - mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path") mock_os_isdir.return_value = True mock_upload_config_file.return_value = True - mock_os_listdir.return_value = ['deploy_chart_fake.tgz', - 'deploy_overrides_fake.yaml', - 'deploy_playbook_fake.yaml'] + mock_os_listdir.return_value = [ + "deploy_chart_fake.tgz", + "deploy_overrides_fake.yaml", + "deploy_playbook_fake.yaml", + ] - upload_files = [("install_values", "install_fake_filename", - json.dumps(install_data).encode("utf-8")), - ("bootstrap_values", "bootstrap_fake_filename", - json.dumps(bootstrap_data).encode("utf-8")), - ("deploy_config", "config_fake_filename", - json.dumps(config_data).encode("utf-8"))] + upload_files = [ + ( + "install_values", + "install_fake_filename", + json.dumps(install_data).encode("utf-8"), + ), + ( + "bootstrap_values", + "bootstrap_fake_filename", + json.dumps(bootstrap_data).encode("utf-8"), + ), + ( + "deploy_config", + "config_fake_filename", + json.dumps(config_data).encode("utf-8"), + ), + ] with mock.patch('builtins.open', - mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)): + mock.mock_open( + read_data=fake_subcloud.FAKE_UPGRADES_METADATA + )): response = self.app.patch( FAKE_URL + '/' + str(subcloud.id) + '/redeploy', headers=FAKE_HEADERS, params=redeploy_data, @@ -1747,294 +2003,429 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): mock_validate_subcloud_config.assert_called_once() mock_validate_k8s_version.assert_called_once() self.mock_rpc_client().redeploy_subcloud.assert_called_once_with( - mock.ANY, - subcloud.id, - mock.ANY) + mock.ANY, subcloud.id, mock.ANY + ) self.assertEqual(response.status_int, 200) - self.assertEqual(fake_subcloud.FAKE_SOFTWARE_VERSION, - response.json['software-version']) + self.assertEqual( + fake_subcloud.FAKE_SOFTWARE_VERSION, response.json["software-version"] + ) - @mock.patch.object(cutils, 'load_yaml_file') - @mock.patch.object(psd_common.PatchingClient, 'query') - @mock.patch.object(os.path, 'exists') - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(os, 'listdir') - @mock.patch.object(cutils, 'get_vault_load_files') + @mock.patch.object(cutils, "load_yaml_file") + @mock.patch.object(psd_common.PatchingClient, "query") + @mock.patch.object(os.path, "exists") + @mock.patch.object(os.path, "isdir") + @mock.patch.object(os, "listdir") + @mock.patch.object(cutils, "get_vault_load_files") def test_redeploy_subcloud_no_request_body( - self, mock_get_vault_load_files, mock_os_listdir, - mock_os_isdir, mock_path_exists, mock_query, mock_load_yaml): - - fake_bmc_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') + self, + mock_get_vault_load_files, + mock_os_listdir, + mock_os_isdir, + mock_path_exists, + mock_query, + mock_load_yaml, + ): + fake_bmc_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - install_data.pop('software_version') - install_data['bmc_password'] = fake_bmc_password + install_data.pop("software_version") + install_data["bmc_password"] = fake_bmc_password subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], - data_install=json.dumps(install_data)) + self.ctx, + name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], + data_install=json.dumps(install_data), + ) - config_file = psd_common.get_config_file_path(subcloud.name, - consts.DEPLOY_CONFIG) + config_file = psd_common.get_config_file_path( + subcloud.name, consts.DEPLOY_CONFIG + ) mock_query.return_value = {} - mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path") mock_os_isdir.return_value = True - mock_os_listdir.return_value = ['deploy_chart_fake.tgz', - 'deploy_overrides_fake.yaml', - 'deploy_playbook_fake.yaml'] + mock_os_listdir.return_value = [ + "deploy_chart_fake.tgz", + "deploy_overrides_fake.yaml", + "deploy_playbook_fake.yaml", + ] mock_path_exists.side_effect = lambda x: True if x == config_file else False mock_load_yaml.return_value = {"software_version": SW_VERSION} - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params={}) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params={}, + ) def test_redeploy_online_subcloud(self): - subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"]) - db_api.subcloud_update(self.ctx, subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"] + ) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + ) - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params={}) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params={}, + ) self.mock_rpc_client().redeploy_subcloud.assert_not_called() def test_redeploy_managed_subcloud(self): - subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"]) - db_api.subcloud_update(self.ctx, subcloud.id, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"] + ) + db_api.subcloud_update( + self.ctx, + subcloud.id, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params={}) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params={}, + ) self.mock_rpc_client().redeploy_subcloud.assert_not_called() - @mock.patch.object(cutils, 'load_yaml_file') - @mock.patch.object(psd_common.PatchingClient, 'query') - @mock.patch.object(os.path, 'exists') - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(os, 'listdir') - @mock.patch.object(cutils, 'get_vault_load_files') - @mock.patch.object(psd_common, 'validate_k8s_version') + @mock.patch.object(cutils, "load_yaml_file") + @mock.patch.object(psd_common.PatchingClient, "query") + @mock.patch.object(os.path, "exists") + @mock.patch.object(os.path, "isdir") + @mock.patch.object(os, "listdir") + @mock.patch.object(cutils, "get_vault_load_files") + @mock.patch.object(psd_common, "validate_k8s_version") def test_redeploy_subcloud_missing_required_value( - self, mock_validate_k8s_version, mock_get_vault_load_files, - mock_os_listdir, mock_os_isdir, mock_path_exists, mock_query, - mock_load_yaml): - - fake_bmc_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') + self, + mock_validate_k8s_version, + mock_get_vault_load_files, + mock_os_listdir, + mock_os_isdir, + mock_path_exists, + mock_query, + mock_load_yaml, + ): + fake_bmc_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) fake_sysadmin_password = base64.b64encode( - 'sysadmin_password'.encode("utf-8")).decode('utf-8') + "sysadmin_password".encode("utf-8") + ).decode("utf-8") install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - install_data.pop('software_version') - install_data['bmc_password'] = fake_bmc_password + install_data.pop("software_version") + install_data["bmc_password"] = fake_bmc_password subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], - data_install=json.dumps(install_data)) + self.ctx, + name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], + data_install=json.dumps(install_data), + ) - config_file = psd_common.get_config_file_path(subcloud.name, - consts.DEPLOY_CONFIG) + config_file = psd_common.get_config_file_path( + subcloud.name, consts.DEPLOY_CONFIG + ) mock_query.return_value = {} - mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path") mock_os_isdir.return_value = True - mock_os_listdir.return_value = ['deploy_chart_fake.tgz', - 'deploy_overrides_fake.yaml', - 'deploy_playbook_fake.yaml'] + mock_os_listdir.return_value = [ + "deploy_chart_fake.tgz", + "deploy_overrides_fake.yaml", + "deploy_playbook_fake.yaml", + ] mock_path_exists.side_effect = lambda x: True if x == config_file else False mock_load_yaml.return_value = {"software_version": SW_VERSION} - for k in ['name', 'system_mode', 'external_oam_subnet', - 'external_oam_gateway_address', 'external_oam_floating_address', - 'sysadmin_password']: + for k in [ + "name", + "system_mode", + "external_oam_subnet", + "external_oam_gateway_address", + "external_oam_floating_address", + "sysadmin_password", + ]: bootstrap_values = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) - redeploy_data = {**bootstrap_values, - 'sysadmin_password': fake_sysadmin_password} + redeploy_data = { + **bootstrap_values, + "sysadmin_password": fake_sysadmin_password, + } del redeploy_data[k] - upload_files = [("bootstrap_values", "bootstrap_fake_filename", - json.dumps(redeploy_data).encode("utf-8"))] - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params=redeploy_data, - upload_files=upload_files) + upload_files = [ + ( + "bootstrap_values", + "bootstrap_fake_filename", + json.dumps(redeploy_data).encode("utf-8"), + ) + ] + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params=redeploy_data, + upload_files=upload_files, + ) - @mock.patch.object(psd_common, 'upload_config_file') - @mock.patch.object(psd_common.PatchingClient, 'query') - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(os, 'listdir') - @mock.patch.object(cutils, 'get_vault_load_files') - @mock.patch.object(psd_common, 'validate_k8s_version') - @mock.patch.object(psd_common, 'validate_subcloud_config') - @mock.patch.object(psd_common, 'validate_bootstrap_values') + @mock.patch.object(psd_common, "upload_config_file") + @mock.patch.object(psd_common.PatchingClient, "query") + @mock.patch.object(os.path, "isdir") + @mock.patch.object(os, "listdir") + @mock.patch.object(cutils, "get_vault_load_files") + @mock.patch.object(psd_common, "validate_k8s_version") + @mock.patch.object(psd_common, "validate_subcloud_config") + @mock.patch.object(psd_common, "validate_bootstrap_values") def test_redeploy_subcloud_missing_stored_values( - self, mock_validate_bootstrap_values, mock_validate_subcloud_config, - mock_validate_k8s_version, mock_get_vault_load_files, - mock_os_listdir, mock_os_isdir, mock_query, mock_upload_config_values): - - fake_bmc_password = base64.b64encode( - 'bmc_password'.encode("utf-8")).decode('utf-8') + self, + mock_validate_bootstrap_values, + mock_validate_subcloud_config, + mock_validate_k8s_version, + mock_get_vault_load_files, + mock_os_listdir, + mock_os_isdir, + mock_query, + mock_upload_config_values, + ): + fake_bmc_password = base64.b64encode("bmc_password".encode("utf-8")).decode( + "utf-8" + ) fake_sysadmin_password = base64.b64encode( - 'sysadmin_password'.encode("utf-8")).decode('utf-8') + "sysadmin_password".encode("utf-8") + ).decode("utf-8") install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES) - install_data.pop('software_version') + install_data.pop("software_version") bootstrap_data = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) - config_data = {'deploy_config': 'deploy config values'} + config_data = {"deploy_config": "deploy config values"} - for k in ['management_subnet', 'management_start_address', - 'management_end_address', 'management_gateway_address', - 'systemcontroller_gateway_address']: + for k in [ + "management_subnet", + "management_start_address", + "management_end_address", + "management_gateway_address", + "systemcontroller_gateway_address", + ]: del bootstrap_data[k] - redeploy_data = {**install_data, **bootstrap_data, **config_data, - 'sysadmin_password': fake_sysadmin_password, - 'bmc_password': fake_bmc_password} + redeploy_data = { + **install_data, + **bootstrap_data, + **config_data, + "sysadmin_password": fake_sysadmin_password, + "bmc_password": fake_bmc_password, + } subcloud = fake_subcloud.create_fake_subcloud( - self.ctx, name=bootstrap_data["name"]) + self.ctx, name=bootstrap_data["name"] + ) mock_query.return_value = {} - mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path') + mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path") mock_os_isdir.return_value = True mock_upload_config_values.return_value = True - mock_os_listdir.return_value = ['deploy_chart_fake.tgz', - 'deploy_overrides_fake.yaml', - 'deploy_playbook_fake.yaml'] + mock_os_listdir.return_value = [ + "deploy_chart_fake.tgz", + "deploy_overrides_fake.yaml", + "deploy_playbook_fake.yaml", + ] - upload_files = [("install_values", "install_fake_filename", - json.dumps(install_data).encode("utf-8")), - ("bootstrap_values", "bootstrap_fake_filename", - json.dumps(bootstrap_data).encode("utf-8")), - ("deploy_config", "config_fake_filename", - json.dumps(config_data).encode("utf-8"))] + upload_files = [ + ( + "install_values", + "install_fake_filename", + json.dumps(install_data).encode("utf-8"), + ), + ( + "bootstrap_values", + "bootstrap_fake_filename", + json.dumps(bootstrap_data).encode("utf-8"), + ), + ( + "deploy_config", + "config_fake_filename", + json.dumps(config_data).encode("utf-8"), + ), + ] response = self.app.patch( - FAKE_URL + '/' + str(subcloud.id) + '/redeploy', - headers=FAKE_HEADERS, params=redeploy_data, - upload_files=upload_files) + FAKE_URL + "/" + str(subcloud.id) + "/redeploy", + headers=FAKE_HEADERS, + params=redeploy_data, + upload_files=upload_files, + ) mock_validate_bootstrap_values.assert_called_once() mock_validate_subcloud_config.assert_called_once() mock_validate_k8s_version.assert_called_once() self.mock_rpc_client().redeploy_subcloud.assert_called_once_with( - mock.ANY, + mock.ANY, subcloud.id, mock.ANY + ) + self.assertEqual(response.status_int, 200) + self.assertEqual(SW_VERSION, response.json["software-version"]) + + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_validate_detailed( + self, + mock_get_prestage_payload, + mock_prestage_subcloud_info, + mock_controller_upgrade, + ): + subcloud = fake_subcloud.create_fake_subcloud(self.ctx) + subcloud = db_api.subcloud_update( + self.ctx, subcloud.id, - mock.ANY) - self.assertEqual(response.status_int, 200) - self.assertEqual(SW_VERSION, response.json['software-version']) - - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_validate_detailed(self, mock_get_prestage_payload, - mock_prestage_subcloud_info, - mock_controller_upgrade): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) - - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': False} - mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_SIMPLEX, \ - health_report_no_alarm, \ - OAM_FLOATING_IP - - self.mock_rpc_client().prestage_subcloud.return_value = True - mock_get_prestage_payload.return_value = data - - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id) + - '/prestage', - headers=FAKE_HEADERS, - params=data) - self.mock_rpc_client().prestage_subcloud.assert_called_once_with( - mock.ANY, - mock.ANY) - self.assertEqual(response.status_int, 200) - - @mock.patch.object(cutils, 'get_systemcontroller_installed_loads') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_invalid_release(self, mock_get_prestage_payload, - mock_controller_upgrade, - mock_installed_loads): - subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - fake_release = '21.12' - mock_installed_loads.return_value = ['22.12'] - - fake_password = (base64.b64encode('testpass'.encode("utf-8"))). \ - decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': False, - 'release': fake_release} + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password, "force": False} mock_controller_upgrade.return_value = list() + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_SIMPLEX, + health_report_no_alarm, + OAM_FLOATING_IP, + ) self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - def test_prestage_subcloud_unmanaged(self, mock_controller_upgrade, - mock_get_prestage_payload): + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + self.mock_rpc_client().prestage_subcloud.assert_called_once_with( + mock.ANY, mock.ANY + ) + self.assertEqual(response.status_int, 200) + @mock.patch.object(cutils, "get_systemcontroller_installed_loads") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_invalid_release( + self, + mock_get_prestage_payload, + mock_controller_upgrade, + mock_installed_loads, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password} + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) + + fake_release = "21.12" + mock_installed_loads.return_value = ["22.12"] + + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = { + "sysadmin_password": fake_password, + "force": False, + "release": fake_release, + } mock_controller_upgrade.return_value = list() self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - def test_prestage_subcloud_offline(self, mock_controller_upgrade, - mock_get_prestage_payload): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + def test_prestage_subcloud_unmanaged( + self, mock_controller_upgrade, mock_get_prestage_payload + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password} + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + ) + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password} mock_controller_upgrade.return_value = list() self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + def test_prestage_subcloud_offline( + self, mock_controller_upgrade, mock_get_prestage_payload + ): + subcloud = fake_subcloud.create_fake_subcloud(self.ctx) + subcloud = db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password} + mock_controller_upgrade.return_value = list() + + self.mock_rpc_client().prestage_subcloud.return_value = True + mock_get_prestage_payload.return_value = data + + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) def test_prestage_subcloud_backup_in_progress(self): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) @@ -2044,191 +2435,258 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): availability_status=dccommon_consts.AVAILABILITY_ONLINE, deploy_status=consts.DEPLOY_STATE_DONE, management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_status=consts.BACKUP_STATE_IN_PROGRESS) + backup_status=consts.BACKUP_STATE_IN_PROGRESS, + ) - self.assertRaises(exceptions.PrestagePreCheckFailedException, - prestage.initial_subcloud_validate, - subcloud, - [fake_subcloud.FAKE_SOFTWARE_VERSION], - fake_subcloud.FAKE_SOFTWARE_VERSION) + self.assertRaises( + exceptions.PrestagePreCheckFailedException, + prestage.initial_subcloud_validate, + subcloud, + [fake_subcloud.FAKE_SOFTWARE_VERSION], + fake_subcloud.FAKE_SOFTWARE_VERSION, + ) - @mock.patch.object(cutils, 'get_systemcontroller_installed_loads') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_duplex(self, mock_get_prestage_payload, - mock_prestage_subcloud_info, - mock_controller_upgrade, - mock_installed_loads): + @mock.patch.object(cutils, "get_systemcontroller_installed_loads") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_duplex( + self, + mock_get_prestage_payload, + mock_prestage_subcloud_info, + mock_controller_upgrade, + mock_installed_loads, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - fake_release = '21.12' + fake_release = "21.12" mock_installed_loads.return_value = [fake_release] - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).\ - decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': False} + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password, "force": False} mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_DUPLEX, \ - health_report_no_alarm, \ - OAM_FLOATING_IP + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_DUPLEX, + health_report_no_alarm, + OAM_FLOATING_IP, + ) self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_non_mgmt_alarm(self, mock_get_prestage_payload, - mock_prestage_subcloud_info, - mock_controller_upgrade): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_non_mgmt_alarm( + self, + mock_get_prestage_payload, + mock_prestage_subcloud_info, + mock_controller_upgrade, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': False} + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password, "force": False} mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_SIMPLEX, \ - health_report_no_mgmt_alarm, \ - OAM_FLOATING_IP + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_SIMPLEX, + health_report_no_mgmt_alarm, + OAM_FLOATING_IP, + ) self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id) + - '/prestage', - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) self.mock_rpc_client().prestage_subcloud.assert_called_once_with( - mock.ANY, - mock.ANY) + mock.ANY, mock.ANY + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_mgmt_alarm(self, mock_get_prestage_payload, - mock_prestage_subcloud_info, - mock_controller_upgrade): - + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_mgmt_alarm( + self, + mock_get_prestage_payload, + mock_prestage_subcloud_info, + mock_controller_upgrade, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': False} + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password, "force": False} mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_SIMPLEX, \ - health_report_mgmt_alarm, \ - OAM_FLOATING_IP + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_SIMPLEX, + health_report_mgmt_alarm, + OAM_FLOATING_IP, + ) self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_mgmt_alarm_force(self, mock_get_prestage_payload, - mock_prestage_subcloud_info, - mock_controller_upgrade): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_mgmt_alarm_force( + self, + mock_get_prestage_payload, + mock_prestage_subcloud_info, + mock_controller_upgrade, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) subcloud = db_api.subcloud_update( - self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': True} + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password, "force": True} mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_SIMPLEX, \ - health_report_mgmt_alarm, \ - OAM_FLOATING_IP + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_SIMPLEX, + health_report_mgmt_alarm, + OAM_FLOATING_IP, + ) self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id) + - '/prestage', - headers=FAKE_HEADERS, - params=data) + response = self.app.patch_json( + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) self.mock_rpc_client().prestage_subcloud.assert_called_once_with( - mock.ANY, - mock.ANY) + mock.ANY, mock.ANY + ) self.assertEqual(response.status_int, 200) - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_not_allowed_state(self, mock_get_prestage_payload, - mock_prestage_subcloud_info, - mock_controller_upgrade): - + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_not_allowed_state( + self, + mock_get_prestage_payload, + mock_prestage_subcloud_info, + mock_controller_upgrade, + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - subcloud = db_api.subcloud_update(self.ctx, subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - deploy_status='NotAllowedState') + subcloud = db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + deploy_status="NotAllowedState", + ) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password, - 'force': False} + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password, "force": False} mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_SIMPLEX, \ - health_report_no_alarm, \ - OAM_FLOATING_IP + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_SIMPLEX, + health_report_no_alarm, + OAM_FLOATING_IP, + ) self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_controller_upgrading(self, mock_get_prestage_payload, - mock_controller_upgrade): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_controller_upgrading( + self, mock_get_prestage_payload, mock_controller_upgrade + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data = {'sysadmin_password': fake_password} - mock_controller_upgrade.return_value = list('upgrade') + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data = {"sysadmin_password": fake_password} + mock_controller_upgrade.return_value = list("upgrade") self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_no_password(self, mock_get_prestage_payload, - mock_controller_upgrade): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_no_password( + self, mock_get_prestage_payload, mock_controller_upgrade + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) data = {} mock_controller_upgrade.return_value = list() @@ -2236,156 +2694,175 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest): self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) - - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(subclouds.SubcloudsController, '_get_prestage_payload') - def test_prestage_subcloud_password_not_encoded(self, mock_get_prestage_payload, - mock_controller_upgrade): + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(subclouds.SubcloudsController, "_get_prestage_payload") + def test_prestage_subcloud_password_not_encoded( + self, mock_get_prestage_payload, mock_controller_upgrade + ): subcloud = fake_subcloud.create_fake_subcloud(self.ctx) - data = {'sysadmin_password': 'notencoded'} + data = {"sysadmin_password": "notencoded"} mock_controller_upgrade.return_value = list() self.mock_rpc_client().prestage_subcloud.return_value = True mock_get_prestage_payload.return_value = data - six.assertRaisesRegex(self, webtest.app.AppError, "400 *", - self.app.patch_json, FAKE_URL + '/' + - str(subcloud.id) + '/prestage', - headers=FAKE_HEADERS, params=data) + six.assertRaisesRegex( + self, + webtest.app.AppError, + "400 *", + self.app.patch_json, + FAKE_URL + "/" + str(subcloud.id) + "/prestage", + headers=FAKE_HEADERS, + params=data, + ) def test_get_management_subnet(self): - payload = { - 'management_subnet': "192.168.204.0/24" - } - self.assertEqual(cutils.get_management_subnet(payload), - payload['management_subnet']) + payload = {"management_subnet": "192.168.204.0/24"} + self.assertEqual( + cutils.get_management_subnet(payload), payload["management_subnet"] + ) def test_get_management_subnet_return_admin(self): payload = { - 'admin_subnet': "192.168.205.0/24", - 'management_subnet': "192.168.204.0/24" + "admin_subnet": "192.168.205.0/24", + "management_subnet": "192.168.204.0/24", } - self.assertEqual(cutils.get_management_subnet(payload), - payload['admin_subnet']) + self.assertEqual( + cutils.get_management_subnet(payload), payload["admin_subnet"] + ) def test_get_management_start_address(self): - payload = { - 'management_start_address': "192.168.204.2" - } - self.assertEqual(cutils.get_management_start_address(payload), - payload['management_start_address']) + payload = {"management_start_address": "192.168.204.2"} + self.assertEqual( + cutils.get_management_start_address(payload), + payload["management_start_address"], + ) def test_get_management_start_address_return_admin(self): payload = { - 'admin_start_address': "192.168.205.2", - 'management_start_address': "192.168.204.2" + "admin_start_address": "192.168.205.2", + "management_start_address": "192.168.204.2", } - self.assertEqual(cutils.get_management_start_address(payload), - payload['admin_start_address']) + self.assertEqual( + cutils.get_management_start_address(payload), + payload["admin_start_address"], + ) def test_get_management_end_address(self): - payload = { - 'management_end_address': "192.168.204.50" - } - self.assertEqual(cutils.get_management_end_address(payload), - payload['management_end_address']) + payload = {"management_end_address": "192.168.204.50"} + self.assertEqual( + cutils.get_management_end_address(payload), + payload["management_end_address"], + ) def test_get_management_end_address_return_admin(self): payload = { - 'admin_end_address': "192.168.205.50", - 'management_end_address': "192.168.204.50" + "admin_end_address": "192.168.205.50", + "management_end_address": "192.168.204.50", } - self.assertEqual(cutils.get_management_end_address(payload), - payload['admin_end_address']) + self.assertEqual( + cutils.get_management_end_address(payload), payload["admin_end_address"] + ) def test_get_management_gateway_address(self): - payload = { - 'management_gateway_address': "192.168.204.1" - } - self.assertEqual(cutils.get_management_gateway_address(payload), - payload['management_gateway_address']) + payload = {"management_gateway_address": "192.168.204.1"} + self.assertEqual( + cutils.get_management_gateway_address(payload), + payload["management_gateway_address"], + ) def test_get_management_gateway_address_return_admin(self): payload = { - 'admin_gateway_address': "192.168.205.1", - 'management_gateway_address': "192.168.204.1" + "admin_gateway_address": "192.168.205.1", + "management_gateway_address": "192.168.204.1", } - self.assertEqual(cutils.get_management_gateway_address(payload), - payload['admin_gateway_address']) + self.assertEqual( + cutils.get_management_gateway_address(payload), + payload["admin_gateway_address"], + ) def test_validate_admin_config_subnet_small(self): - admin_subnet = "192.168.205.0/32" admin_start_address = "192.168.205.2" admin_end_address = "192.168.205.50" admin_gateway_address = "192.168.205.1" - six.assertRaisesRegex(self, - Exception, - "Subnet too small*", - psd_common.validate_admin_network_config, - admin_subnet, - admin_start_address, - admin_end_address, - admin_gateway_address, - existing_networks=None, - operation=None) + six.assertRaisesRegex( + self, + Exception, + "Subnet too small*", + psd_common.validate_admin_network_config, + admin_subnet, + admin_start_address, + admin_end_address, + admin_gateway_address, + existing_networks=None, + operation=None, + ) def test_validate_admin_config_start_address_outOfSubnet(self): - admin_subnet = "192.168.205.0/28" admin_start_address = "192.168.205.200" admin_end_address = "192.168.205.50" admin_gateway_address = "192.168.205.1" - six.assertRaisesRegex(self, - Exception, - "Address must be in subnet*", - psd_common.validate_admin_network_config, - admin_subnet, - admin_start_address, - admin_end_address, - admin_gateway_address, - existing_networks=None, - operation=None) + six.assertRaisesRegex( + self, + Exception, + "Address must be in subnet*", + psd_common.validate_admin_network_config, + admin_subnet, + admin_start_address, + admin_end_address, + admin_gateway_address, + existing_networks=None, + operation=None, + ) def test_validate_admin_config_end_address_outOfSubnet(self): - admin_subnet = "192.168.205.0/28" admin_start_address = "192.168.205.1" admin_end_address = "192.168.205.50" admin_gateway_address = "192.168.205.1" - six.assertRaisesRegex(self, - Exception, - "Address must be in subnet*", - psd_common.validate_admin_network_config, - admin_subnet, - admin_start_address, - admin_end_address, - admin_gateway_address, - existing_networks=None, - operation=None) + six.assertRaisesRegex( + self, + Exception, + "Address must be in subnet*", + psd_common.validate_admin_network_config, + admin_subnet, + admin_start_address, + admin_end_address, + admin_gateway_address, + existing_networks=None, + operation=None, + ) def test_validate_admin_config_gateway_address_outOfSubnet(self): - admin_subnet = "192.168.205.0/28" admin_start_address = "192.168.205.1" admin_end_address = "192.168.205.12" admin_gateway_address = "192.168.205.50" - six.assertRaisesRegex(self, - Exception, - "Address must be in subnet*", - psd_common.validate_admin_network_config, - admin_subnet, - admin_start_address, - admin_end_address, - admin_gateway_address, - existing_networks=None, - operation=None) + six.assertRaisesRegex( + self, + Exception, + "Address must be in subnet*", + psd_common.validate_admin_network_config, + admin_subnet, + admin_start_address, + admin_end_address, + admin_gateway_address, + existing_networks=None, + operation=None, + ) diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_system_peer.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_system_peer.py index 7bebeb481..59818b515 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_system_peer.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_system_peer.py @@ -1,15 +1,15 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # +import uuid + import mock from six.moves import http_client -import uuid from dcmanager.db.sqlalchemy import api as db_api from dcmanager.rpc import client as rpc_client - from dcmanager.tests.unit.api import test_root_controller as testroot from dcmanager.tests.unit.api.v1.controllers.mixins import APIMixin from dcmanager.tests.unit.api.v1.controllers.mixins import DeleteMixin diff --git a/distributedcloud/dcmanager/tests/unit/audit/test_firmware_audit_manager.py b/distributedcloud/dcmanager/tests/unit/audit/test_firmware_audit_manager.py index 5664ea383..0e355fc5f 100644 --- a/distributedcloud/dcmanager/tests/unit/audit/test_firmware_audit_manager.py +++ b/distributedcloud/dcmanager/tests/unit/audit/test_firmware_audit_manager.py @@ -1,27 +1,23 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import mock - from oslo_config import cfg -import sys - from dccommon import consts as dccommon_consts - -sys.modules['fm_core'] = mock.Mock() - from dcmanager.audit import firmware_audit from dcmanager.audit import patch_audit from dcmanager.audit import subcloud_audit_manager diff --git a/distributedcloud/dcmanager/tests/unit/audit/test_kube_audit_manager.py b/distributedcloud/dcmanager/tests/unit/audit/test_kube_audit_manager.py index 09ee1ce89..9c65ef317 100644 --- a/distributedcloud/dcmanager/tests/unit/audit/test_kube_audit_manager.py +++ b/distributedcloud/dcmanager/tests/unit/audit/test_kube_audit_manager.py @@ -1,26 +1,28 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # -import mock import uuid +import mock + from dccommon import consts as dccommon_consts from dcmanager.audit import firmware_audit from dcmanager.audit import kubernetes_audit from dcmanager.audit import patch_audit from dcmanager.audit import subcloud_audit_manager - from dcmanager.tests import base from dcmanager.tests import utils diff --git a/distributedcloud/dcmanager/tests/unit/audit/test_patch_audit_manager.py b/distributedcloud/dcmanager/tests/unit/audit/test_patch_audit_manager.py index 966359c21..0896ded03 100644 --- a/distributedcloud/dcmanager/tests/unit/audit/test_patch_audit_manager.py +++ b/distributedcloud/dcmanager/tests/unit/audit/test_patch_audit_manager.py @@ -1,27 +1,23 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import mock - from oslo_config import cfg -import sys - from dccommon import consts as dccommon_consts - -sys.modules['fm_core'] = mock.Mock() - from dcmanager.audit import patch_audit from dcmanager.audit import subcloud_audit_manager from dcmanager.tests import base @@ -389,7 +385,7 @@ class TestPatchAudit(base.DCManagerTestCase): subcloud_region=base.SUBCLOUD_4['region_name'], endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD, sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC), - ] + ] self.fake_dcmanager_state_api.update_subcloud_endpoint_status.\ assert_has_calls(expected_calls) @@ -447,7 +443,8 @@ class TestPatchAudit(base.DCManagerTestCase): am = subcloud_audit_manager.SubcloudAuditManager() am.patch_audit = pm mock_patching_client.side_effect = FakePatchingClientInSync - mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUnmatchedSoftwareVersion + mock_sysinv_client.side_effect = ( + FakeSysinvClientOneLoadUnmatchedSoftwareVersion) do_load_audit = True patch_audit_data = self.get_patch_audit_data(am) diff --git a/distributedcloud/dcmanager/tests/unit/audit/test_service.py b/distributedcloud/dcmanager/tests/unit/audit/test_service.py index 70fc4adc7..cafe35427 100644 --- a/distributedcloud/dcmanager/tests/unit/audit/test_service.py +++ b/distributedcloud/dcmanager/tests/unit/audit/test_service.py @@ -1,22 +1,25 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # +from oslo_config import cfg + from dcmanager.audit import service from dcmanager.common import scheduler from dcmanager.tests import base from dcmanager.tests import utils -from oslo_config import cfg CONF = cfg.CONF diff --git a/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_manager.py b/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_manager.py index 287dbc661..cc72145c6 100644 --- a/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_manager.py +++ b/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_manager.py @@ -1,30 +1,30 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # -import mock - import sys +import mock + from dccommon import consts as dccommon_consts - -sys.modules['fm_core'] = mock.Mock() - from dcmanager.audit import subcloud_audit_manager from dcmanager.db.sqlalchemy import api as db_api - from dcmanager.tests import base +sys.modules['fm_core'] = mock.Mock() + class FakeAuditWorkerAPI(object): diff --git a/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_worker_manager.py b/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_worker_manager.py index 4cfbe67f3..509a1377c 100644 --- a/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_worker_manager.py +++ b/distributedcloud/dcmanager/tests/unit/audit/test_subcloud_audit_worker_manager.py @@ -1,24 +1,26 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import copy -import mock import random - import sys -sys.modules['fm_core'] = mock.Mock() +import mock + +from keystoneauth1 import exceptions as keystone_exceptions from dccommon import consts as dccommon_consts from dcmanager.audit import subcloud_audit_manager @@ -26,9 +28,9 @@ from dcmanager.audit import subcloud_audit_worker_manager from dcmanager.common import consts from dcmanager.common import scheduler from dcmanager.db.sqlalchemy import api as db_api - from dcmanager.tests import base -from keystoneauth1 import exceptions as keystone_exceptions + +sys.modules['fm_core'] = mock.Mock() class FakeDCManagerAPI(object): @@ -434,13 +436,15 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit) # Verify the subcloud was set to online - self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with( - mock.ANY, subcloud.name, subcloud.region_name, - dccommon_consts.AVAILABILITY_ONLINE, False, 0) + self.fake_dcmanager_state_api.update_subcloud_availability.\ + assert_called_with( + mock.ANY, subcloud.name, subcloud.region_name, + dccommon_consts.AVAILABILITY_ONLINE, False, 0 + ) # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were not updated @@ -514,13 +518,15 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit) # Verify the subcloud was set to online - self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with( - mock.ANY, subcloud.name, subcloud.region_name, - dccommon_consts.AVAILABILITY_ONLINE, False, 0) + self.fake_dcmanager_state_api.update_subcloud_availability.\ + assert_called_with( + mock.ANY, subcloud.name, subcloud.region_name, + dccommon_consts.AVAILABILITY_ONLINE, False, 0 + ) # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were not added @@ -569,28 +575,24 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Convert to dict like what would happen calling via RPC # Note: the other data should also be converted... patch_audit_data = patch_audit_data.to_dict() - wm._audit_subcloud(subcloud, - update_subcloud_state, - do_audit_openstack, - patch_audit_data, - firmware_audit_data, - kubernetes_audit_data, - kube_rootca_update_audit_data, - software_audit_data, - do_patch_audit, - do_load_audit, - do_firmware_audit, - do_kubernetes_audit, - do_kube_rootca_update_audit) + wm._audit_subcloud( + subcloud, update_subcloud_state, do_audit_openstack, patch_audit_data, + firmware_audit_data, kubernetes_audit_data, + kube_rootca_update_audit_data, software_audit_data, do_patch_audit, + do_load_audit, do_firmware_audit, do_kubernetes_audit, + do_kube_rootca_update_audit + ) # Verify the subcloud was set to online - self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with( - mock.ANY, subcloud.name, subcloud.region_name, - dccommon_consts.AVAILABILITY_ONLINE, False, 0) + self.fake_dcmanager_state_api.update_subcloud_availability.\ + assert_called_with( + mock.ANY, subcloud.name, subcloud.region_name, + dccommon_consts.AVAILABILITY_ONLINE, False, 0 + ) # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were not added @@ -644,7 +646,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were not added @@ -683,13 +685,15 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit=False) # Verify the subcloud state was updated even though no change - self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with( - mock.ANY, subcloud.name, subcloud.region_name, - dccommon_consts.AVAILABILITY_ONLINE, True, None) + self.fake_dcmanager_state_api.update_subcloud_availability.\ + assert_called_with( + mock.ANY, subcloud.name, subcloud.region_name, + dccommon_consts.AVAILABILITY_ONLINE, True, None + ) # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were not updated @@ -748,18 +752,17 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit) # Convert to dict like what would happen calling via RPC patch_audit_data = patch_audit_data.to_dict() - wm._audit_subcloud(subcloud, update_subcloud_state=False, - do_audit_openstack=False, - patch_audit_data=patch_audit_data, - firmware_audit_data=firmware_audit_data, - kubernetes_audit_data=kubernetes_audit_data, - kube_rootca_update_audit_data=kube_rootca_update_audit_data, - software_audit_data=software_audit_data, - do_patch_audit=do_patch_audit, - do_load_audit=do_load_audit, - do_firmware_audit=do_firmware_audit, - do_kubernetes_audit=do_kubernetes_audit, - do_kube_rootca_update_audit=do_kube_rootca_update_audit) + wm._audit_subcloud( + subcloud, update_subcloud_state=False, do_audit_openstack=False, + patch_audit_data=patch_audit_data, + firmware_audit_data=firmware_audit_data, + kubernetes_audit_data=kubernetes_audit_data, + kube_rootca_update_audit_data=kube_rootca_update_audit_data, + software_audit_data=software_audit_data, + do_patch_audit=do_patch_audit, do_load_audit=do_load_audit, + do_firmware_audit=do_firmware_audit, + do_kubernetes_audit=do_kubernetes_audit, + do_kube_rootca_update_audit=do_kube_rootca_update_audit) # Verify alarm update is called once self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with( @@ -787,7 +790,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase): self.assertEqual(subcloud.audit_fail_count, audit_fail_count) # Verify the update_subcloud_availability was not called - self.fake_dcmanager_state_api.update_subcloud_availability.assert_not_called() + self.fake_dcmanager_state_api.update_subcloud_availability.\ + assert_not_called() # Update the DB like dcmanager would do. subcloud = db_api.subcloud_update( @@ -796,18 +800,17 @@ class TestAuditWorkerManager(base.DCManagerTestCase): audit_fail_count=audit_fail_count) # Audit the subcloud again - wm._audit_subcloud(subcloud, update_subcloud_state=False, - do_audit_openstack=False, - patch_audit_data=patch_audit_data, - firmware_audit_data=firmware_audit_data, - kubernetes_audit_data=kubernetes_audit_data, - kube_rootca_update_audit_data=kube_rootca_update_audit_data, - software_audit_data=software_audit_data, - do_patch_audit=do_patch_audit, - do_load_audit=do_load_audit, - do_firmware_audit=do_firmware_audit, - do_kubernetes_audit=do_kubernetes_audit, - do_kube_rootca_update_audit=do_kube_rootca_update_audit) + wm._audit_subcloud( + subcloud, update_subcloud_state=False, do_audit_openstack=False, + patch_audit_data=patch_audit_data, + firmware_audit_data=firmware_audit_data, + kubernetes_audit_data=kubernetes_audit_data, + kube_rootca_update_audit_data=kube_rootca_update_audit_data, + software_audit_data=software_audit_data, + do_patch_audit=do_patch_audit, do_load_audit=do_load_audit, + do_firmware_audit=do_firmware_audit, + do_kubernetes_audit=do_kubernetes_audit, + do_kube_rootca_update_audit=do_kube_rootca_update_audit) audit_fail_count = audit_fail_count + 1 @@ -816,7 +819,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase): self.assertEqual(subcloud.audit_fail_count, audit_fail_count) # Verify the update_subcloud_availability was not called - self.fake_dcmanager_state_api.update_subcloud_availability.assert_not_called() + self.fake_dcmanager_state_api.update_subcloud_availability.\ + assert_not_called() # Verify alarm update is called only once self.fake_alarm_aggr.update_alarm_summary.assert_called_once() @@ -865,18 +869,17 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit) # Convert to dict like what would happen calling via RPC patch_audit_data = patch_audit_data.to_dict() - wm._audit_subcloud(subcloud, update_subcloud_state=False, - do_audit_openstack=True, - patch_audit_data=patch_audit_data, - firmware_audit_data=firmware_audit_data, - kubernetes_audit_data=kubernetes_audit_data, - kube_rootca_update_audit_data=kube_rootca_update_audit_data, - software_audit_data=software_audit_data, - do_patch_audit=do_patch_audit, - do_load_audit=do_load_audit, - do_firmware_audit=do_firmware_audit, - do_kubernetes_audit=do_kubernetes_audit, - do_kube_rootca_update_audit=do_kube_rootca_update_audit) + wm._audit_subcloud( + subcloud, update_subcloud_state=False, do_audit_openstack=True, + patch_audit_data=patch_audit_data, + firmware_audit_data=firmware_audit_data, + kubernetes_audit_data=kubernetes_audit_data, + kube_rootca_update_audit_data=kube_rootca_update_audit_data, + software_audit_data=software_audit_data, + do_patch_audit=do_patch_audit, do_load_audit=do_load_audit, + do_firmware_audit=do_firmware_audit, + do_kubernetes_audit=do_kubernetes_audit, + do_kube_rootca_update_audit=do_kube_rootca_update_audit) # Verify the subcloud state was not updated self.fake_dcmanager_state_api.update_subcloud_availability.\ @@ -884,7 +887,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were not updated @@ -911,7 +914,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase): @mock.patch.object(subcloud_audit_worker_manager.db_api, 'subcloud_audits_end_audit') def test_online_subcloud_audit_not_skipping_while_installing( - self, mock_subcloud_audits_end_audit, mock_thread_start): + self, mock_subcloud_audits_end_audit, mock_thread_start + ): subcloud = self.create_subcloud_static(self.ctx, name='subcloud1') self.assertIsNotNone(subcloud) @@ -973,18 +977,17 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit) # Convert to dict like what would happen calling via RPC patch_audit_data = patch_audit_data.to_dict() - wm._audit_subcloud(subcloud, update_subcloud_state=False, - do_audit_openstack=False, - patch_audit_data=patch_audit_data, - firmware_audit_data=firmware_audit_data, - kubernetes_audit_data=kubernetes_audit_data, - kube_rootca_update_audit_data=kube_rootca_update_audit_data, - software_audit_data=software_audit_data, - do_patch_audit=do_patch_audit, - do_load_audit=do_load_audit, - do_firmware_audit=do_firmware_audit, - do_kubernetes_audit=do_kubernetes_audit, - do_kube_rootca_update_audit=do_kube_rootca_update_audit) + wm._audit_subcloud( + subcloud, update_subcloud_state=False, do_audit_openstack=False, + patch_audit_data=patch_audit_data, + firmware_audit_data=firmware_audit_data, + kubernetes_audit_data=kubernetes_audit_data, + kube_rootca_update_audit_data=kube_rootca_update_audit_data, + software_audit_data=software_audit_data, + do_patch_audit=do_patch_audit, do_load_audit=do_load_audit, + do_firmware_audit=do_firmware_audit, + do_kubernetes_audit=do_kubernetes_audit, + do_kube_rootca_update_audit=do_kube_rootca_update_audit) # Verify that the subcloud was updated to offline audit_fail_count = 2 @@ -997,7 +1000,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): @mock.patch.object(subcloud_audit_worker_manager.db_api, 'subcloud_audits_end_audit') def test_offline_subcloud_audit_skip_while_installing( - self, mock_subcloud_audits_end_audit, mock_thread_start): + self, mock_subcloud_audits_end_audit, mock_thread_start): subcloud = self.create_subcloud_static(self.ctx, name='subcloud1') self.assertIsNotNone(subcloud) @@ -1059,18 +1062,17 @@ class TestAuditWorkerManager(base.DCManagerTestCase): do_kube_rootca_update_audit) # Convert to dict like what would happen calling via RPC patch_audit_data = patch_audit_data.to_dict() - wm._audit_subcloud(subcloud, update_subcloud_state=False, - do_audit_openstack=True, - patch_audit_data=patch_audit_data, - firmware_audit_data=firmware_audit_data, - kubernetes_audit_data=kubernetes_audit_data, - kube_rootca_update_audit_data=kube_rootca_update_audit_data, - software_audit_data=software_audit_data, - do_patch_audit=do_patch_audit, - do_load_audit=do_load_audit, - do_firmware_audit=do_firmware_audit, - do_kubernetes_audit=do_kubernetes_audit, - do_kube_rootca_update_audit=do_kube_rootca_update_audit) + wm._audit_subcloud( + subcloud, update_subcloud_state=False, do_audit_openstack=True, + patch_audit_data=patch_audit_data, + firmware_audit_data=firmware_audit_data, + kubernetes_audit_data=kubernetes_audit_data, + kube_rootca_update_audit_data=kube_rootca_update_audit_data, + software_audit_data=software_audit_data, + do_patch_audit=do_patch_audit, do_load_audit=do_load_audit, + do_firmware_audit=do_firmware_audit, + do_kubernetes_audit=do_kubernetes_audit, + do_kube_rootca_update_audit=do_kube_rootca_update_audit) # Verify the audit fail count was updated in the DB. subcloud = db_api.subcloud_get(self.ctx, subcloud.id) @@ -1153,7 +1155,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were added @@ -1218,7 +1220,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were removed @@ -1282,7 +1284,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify the openstack endpoints were removed @@ -1368,7 +1370,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase): # Verify the _update_subcloud_audit_fail_count is not called with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \ - mock_update_subcloud_audit_fail_count: + mock_update_subcloud_audit_fail_count: mock_update_subcloud_audit_fail_count.assert_not_called() # Verify firmware audit is not called diff --git a/distributedcloud/dcmanager/tests/unit/common/test_phased_subcloud_deploy.py b/distributedcloud/dcmanager/tests/unit/common/test_phased_subcloud_deploy.py index 15f14c07d..916f29422 100644 --- a/distributedcloud/dcmanager/tests/unit/common/test_phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/tests/unit/common/test_phased_subcloud_deploy.py @@ -1,15 +1,17 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import mock import os +import mock + @mock.patch.object(os, 'listdir') -def test_check_deploy_files_in_alternate_location_with_all_file_exists(self, mock_os_isdir, mock_os_listdir): +def test_check_deploy_files_in_alternate_location_with_all_file_exists( + self, mock_os_isdir, mock_os_listdir): payload = {} mock_os_isdir.return_value = True mock_os_listdir.return_value = ['deploy-chart-fake-deployment-manager.tgz', @@ -20,7 +22,8 @@ def test_check_deploy_files_in_alternate_location_with_all_file_exists(self, moc self.assertEqual(response, True) -def test_check_deploy_files_in_alternate_location_with_deploy_chart_not_exists(self, mock_os_isdir, mock_os_listdir): +def test_check_deploy_files_in_alternate_location_with_deploy_chart_not_exists( + self, mock_os_isdir, mock_os_listdir): payload = {} mock_os_isdir.return_value = True mock_os_listdir.return_value = ['deploy-chart-fake.tgz', @@ -31,7 +34,8 @@ def test_check_deploy_files_in_alternate_location_with_deploy_chart_not_exists(s self.assertEqual(response, False) -def test_check_deploy_files_in_alternate_location_with_deploy_overrides_not_exists(self, mock_os_isdir, mock_os_listdir): +def test_check_deploy_files_in_alternate_location_with_deploy_overrides_not_exists( + self, mock_os_isdir, mock_os_listdir): payload = {} mock_os_isdir.return_value = True mock_os_listdir.return_value = ['deploy-chart-fake-deployment-manager.tgz', @@ -42,7 +46,8 @@ def test_check_deploy_files_in_alternate_location_with_deploy_overrides_not_exis self.assertEqual(response, False) -def test_check_deploy_files_in_alternate_location_with_deploy_playbook_not_exists(self, mock_os_isdir, mock_os_listdir): +def test_check_deploy_files_in_alternate_location_with_deploy_playbook_not_exists( + self, mock_os_isdir, mock_os_listdir): payload = {} mock_os_isdir.return_value = True mock_os_listdir.return_value = ['deploy-chart-fake-deployment-manager.tgz', diff --git a/distributedcloud/dcmanager/tests/unit/db/test_subcloud_alarms.py b/distributedcloud/dcmanager/tests/unit/db/test_subcloud_alarms.py index 32b1ec0c6..28159c0b0 100644 --- a/distributedcloud/dcmanager/tests/unit/db/test_subcloud_alarms.py +++ b/distributedcloud/dcmanager/tests/unit/db/test_subcloud_alarms.py @@ -1,21 +1,24 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # + from oslo_db import exception as db_exception from dcmanager.common import consts from dcmanager.common import exceptions as exception -from dcmanager.db import api as api +from dcmanager.db import api from dcmanager.db.sqlalchemy import api as db_api from dcmanager.tests import base diff --git a/distributedcloud/dcmanager/tests/unit/db/test_subcloud_audits.py b/distributedcloud/dcmanager/tests/unit/db/test_subcloud_audits.py index 7bff94a2d..65cb10cf5 100644 --- a/distributedcloud/dcmanager/tests/unit/db/test_subcloud_audits.py +++ b/distributedcloud/dcmanager/tests/unit/db/test_subcloud_audits.py @@ -1,15 +1,17 @@ -# Copyright (c) 2021-2022 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2021-2022, 2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import datetime @@ -18,7 +20,7 @@ from oslo_db import exception as db_exception from oslo_utils import uuidutils from dcmanager.common import exceptions as exception -from dcmanager.db import api as api +from dcmanager.db import api from dcmanager.db.sqlalchemy import api as db_api from dcmanager.tests import base @@ -26,7 +28,6 @@ get_engine = api.get_engine class DBAPISubcloudAuditsTest(base.DCManagerTestCase): - @staticmethod def create_subcloud(ctxt, name, **kwargs): values = { @@ -59,100 +60,112 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase): def test_subcloud_audits_get(self): # Test the SubcloudAudits created when we created subcloud2 in setup. result = db_api.subcloud_audits_get(self.ctx, 2) - self.assertEqual(result['subcloud_id'], 2) - self.assertEqual(result['audit_started_at'], datetime.datetime(1, 1, 1, 0, 0)) - self.assertEqual(result['audit_finished_at'], datetime.datetime(1, 1, 1, 0, 0)) - self.assertEqual(result['patch_audit_requested'], False) - self.assertEqual(result['load_audit_requested'], False) - self.assertEqual(result['firmware_audit_requested'], False) - self.assertEqual(result['kubernetes_audit_requested'], False) - self.assertEqual(result['kube_rootca_update_audit_requested'], False) - self.assertEqual(result['spare_audit_requested'], False) - self.assertEqual(result['spare2_audit_requested'], False) - self.assertEqual(result['reserved'], None) + self.assertEqual(result["subcloud_id"], 2) + self.assertEqual( + result["audit_started_at"], datetime.datetime(1, 1, 1, 0, 0) + ) + self.assertEqual( + result["audit_finished_at"], datetime.datetime(1, 1, 1, 0, 0) + ) + self.assertEqual(result["patch_audit_requested"], False) + self.assertEqual(result["load_audit_requested"], False) + self.assertEqual(result["firmware_audit_requested"], False) + self.assertEqual(result["kubernetes_audit_requested"], False) + self.assertEqual(result["kube_rootca_update_audit_requested"], False) + self.assertEqual(result["spare_audit_requested"], False) + self.assertEqual(result["spare2_audit_requested"], False) + self.assertEqual(result["reserved"], None) def test_subcloud_audits_get_not_found(self): - self.assertRaises(exception.SubcloudNotFound, - db_api.subcloud_audits_get, - self.ctx, '4') + self.assertRaises( + exception.SubcloudNotFound, db_api.subcloud_audits_get, self.ctx, "4" + ) def test_subcloud_alarms_create_duplicate(self): # There's already an entry for subcloud2, try adding another. - self.assertRaises(db_exception.DBDuplicateEntry, - db_api.subcloud_audits_create, - self.ctx, 2) + self.assertRaises( + db_exception.DBDuplicateEntry, db_api.subcloud_audits_create, self.ctx, 2 + ) def test_subcloud_audits_get_all(self): subcloud_audits = db_api.subcloud_audits_get_all(self.ctx) self.assertEqual(len(subcloud_audits), 3) - self.assertEqual(subcloud_audits[0]['subcloud_id'], 1) - self.assertEqual(subcloud_audits[1]['subcloud_id'], 2) - self.assertEqual(subcloud_audits[2]['subcloud_id'], 3) + self.assertEqual(subcloud_audits[0]["subcloud_id"], 1) + self.assertEqual(subcloud_audits[1]["subcloud_id"], 2) + self.assertEqual(subcloud_audits[2]["subcloud_id"], 3) def test_subcloud_alarms_delete(self): result = db_api.subcloud_audits_get(self.ctx, 2) - db_api.subcloud_destroy(self.ctx, result['subcloud_id']) - self.assertRaises(exception.SubcloudNotFound, - db_api.subcloud_audits_get, - self.ctx, result['subcloud_id']) + db_api.subcloud_destroy(self.ctx, result["subcloud_id"]) + self.assertRaises( + exception.SubcloudNotFound, + db_api.subcloud_audits_get, + self.ctx, + result["subcloud_id"], + ) def test_subcloud_audits_update(self): result = db_api.subcloud_audits_get(self.ctx, 1) - self.assertEqual(result['patch_audit_requested'], False) + self.assertEqual(result["patch_audit_requested"], False) result = db_api.subcloud_audits_get(self.ctx, 2) - self.assertEqual(result['patch_audit_requested'], False) - values = {'patch_audit_requested': True} + self.assertEqual(result["patch_audit_requested"], False) + values = {"patch_audit_requested": True} result = db_api.subcloud_audits_update(self.ctx, 2, values) - self.assertEqual(result['patch_audit_requested'], True) + self.assertEqual(result["patch_audit_requested"], True) result = db_api.subcloud_audits_get(self.ctx, 1) - self.assertEqual(result['patch_audit_requested'], False) + self.assertEqual(result["patch_audit_requested"], False) result = db_api.subcloud_audits_get(self.ctx, 2) - self.assertEqual(result['patch_audit_requested'], True) + self.assertEqual(result["patch_audit_requested"], True) def test_subcloud_audits_update_all(self): subcloud_audits = db_api.subcloud_audits_get_all(self.ctx) for audit in subcloud_audits: - self.assertEqual(audit['patch_audit_requested'], False) - self.assertEqual(audit['load_audit_requested'], False) - values = {'patch_audit_requested': True, - 'load_audit_requested': True} + self.assertEqual(audit["patch_audit_requested"], False) + self.assertEqual(audit["load_audit_requested"], False) + values = {"patch_audit_requested": True, "load_audit_requested": True} result = db_api.subcloud_audits_update_all(self.ctx, values) self.assertEqual(result, 3) subcloud_audits = db_api.subcloud_audits_get_all(self.ctx) for audit in subcloud_audits: - self.assertEqual(audit['patch_audit_requested'], True) - self.assertEqual(audit['load_audit_requested'], True) + self.assertEqual(audit["patch_audit_requested"], True) + self.assertEqual(audit["load_audit_requested"], True) def test_subcloud_audits_get_all_need_audit(self): current_time = datetime.datetime.utcnow() - last_audit_threshold = current_time - datetime.timedelta( - seconds=1000) + last_audit_threshold = current_time - datetime.timedelta(seconds=1000) audits = db_api.subcloud_audits_get_all_need_audit( - self.ctx, last_audit_threshold) + self.ctx, last_audit_threshold + ) # They should all need audits. self.assertEqual(len(audits), 3) # Update subcloud1 to show it's been audited recently and # check it doesn't come back as needing an audit. db_api.subcloud_audits_end_audit(self.ctx, 1, []) audits = db_api.subcloud_audits_get_all_need_audit( - self.ctx, last_audit_threshold) + self.ctx, last_audit_threshold + ) subcloud_ids = [audit.subcloud_id for audit in audits] self.assertEqual(len(subcloud_ids), 2) self.assertNotIn(1, subcloud_ids) # Set one of the special audits to make sure it overrides. - values = {'patch_audit_requested': True} + values = {"patch_audit_requested": True} db_api.subcloud_audits_update(self.ctx, 1, values) audits = db_api.subcloud_audits_get_all_need_audit( - self.ctx, last_audit_threshold) + self.ctx, last_audit_threshold + ) self.assertEqual(len(audits), 3) def test_subcloud_audits_start_and_end(self): audit = db_api.subcloud_audits_get_and_start_audit(self.ctx, 3) - self.assertTrue((datetime.datetime.utcnow() - audit.audit_started_at) < - datetime.timedelta(seconds=1)) + self.assertTrue( + (datetime.datetime.utcnow() - audit.audit_started_at) + < datetime.timedelta(seconds=1) + ) audit = db_api.subcloud_audits_end_audit(self.ctx, 3, []) - self.assertTrue((datetime.datetime.utcnow() - audit.audit_finished_at) < - datetime.timedelta(seconds=1)) + self.assertTrue( + (datetime.datetime.utcnow() - audit.audit_finished_at) + < datetime.timedelta(seconds=1) + ) self.assertFalse(audit.state_update_requested) def test_subcloud_audits_fix_expired(self): @@ -165,36 +178,40 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase): # but with the 'finished' timestamp recent. db_api.subcloud_audits_end_audit(self.ctx, 2, []) db_api.subcloud_audits_get_and_start_audit(self.ctx, 2) - last_audit_threshold = (datetime.datetime.utcnow() - - datetime.timedelta(seconds=100)) + last_audit_threshold = datetime.datetime.utcnow() - datetime.timedelta( + seconds=100 + ) count = db_api.subcloud_audits_fix_expired_audits( - self.ctx, last_audit_threshold) + self.ctx, last_audit_threshold + ) self.assertEqual(count, 1) # Check that for the one that was updated we didn't trigger sub-audits. result = db_api.subcloud_audits_get(self.ctx, 1) - self.assertEqual(result['patch_audit_requested'], False) + self.assertEqual(result["patch_audit_requested"], False) def test_subcloud_audits_fix_expired_trigger_audits(self): # Set the 'start' timestamp later than the 'finished' timestamp # but with the 'finished' timestamp long ago. db_api.subcloud_audits_get_and_start_audit(self.ctx, 1) - last_audit_threshold = (datetime.datetime.utcnow() - - datetime.timedelta(seconds=100)) + last_audit_threshold = datetime.datetime.utcnow() - datetime.timedelta( + seconds=100 + ) # Fix up expired audits and trigger subaudits. count = db_api.subcloud_audits_fix_expired_audits( - self.ctx, last_audit_threshold, trigger_audits=True) + self.ctx, last_audit_threshold, trigger_audits=True + ) self.assertEqual(count, 1) # For the fixed-up audits, subaudits should be requested. result = db_api.subcloud_audits_get(self.ctx, 1) - self.assertEqual(result['patch_audit_requested'], True) - self.assertEqual(result['firmware_audit_requested'], True) - self.assertEqual(result['load_audit_requested'], True) - self.assertEqual(result['kubernetes_audit_requested'], True) - self.assertEqual(result['kube_rootca_update_audit_requested'], True) + self.assertEqual(result["patch_audit_requested"], True) + self.assertEqual(result["firmware_audit_requested"], True) + self.assertEqual(result["load_audit_requested"], True) + self.assertEqual(result["kubernetes_audit_requested"], True) + self.assertEqual(result["kube_rootca_update_audit_requested"], True) # For the not-fixed-up audits, subaudits should not be requested. result = db_api.subcloud_audits_get(self.ctx, 2) - self.assertEqual(result['patch_audit_requested'], False) - self.assertEqual(result['firmware_audit_requested'], False) - self.assertEqual(result['load_audit_requested'], False) - self.assertEqual(result['kubernetes_audit_requested'], False) - self.assertEqual(result['kube_rootca_update_audit_requested'], False) + self.assertEqual(result["patch_audit_requested"], False) + self.assertEqual(result["firmware_audit_requested"], False) + self.assertEqual(result["load_audit_requested"], False) + self.assertEqual(result["kubernetes_audit_requested"], False) + self.assertEqual(result["kube_rootca_update_audit_requested"], False) diff --git a/distributedcloud/dcmanager/tests/unit/db/test_subcloud_db_api.py b/distributedcloud/dcmanager/tests/unit/db/test_subcloud_db_api.py index 1b5fe0e42..eb3572043 100644 --- a/distributedcloud/dcmanager/tests/unit/db/test_subcloud_db_api.py +++ b/distributedcloud/dcmanager/tests/unit/db/test_subcloud_db_api.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB -# Copyright (c) 2017-2023 Wind River Systems, Inc. +# Copyright (c) 2017-2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,13 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. # + from oslo_db import exception as db_exception +from sqlalchemy.engine import Engine +from sqlalchemy import event from dccommon import consts as dccommon_consts from dcmanager.common import config from dcmanager.common import consts from dcmanager.common import exceptions -from dcmanager.db import api as api +from dcmanager.db import api from dcmanager.db.sqlalchemy import api as db_api from dcmanager.tests import base from dcmanager.tests import utils @@ -28,11 +31,6 @@ from dcmanager.tests import utils config.register_options() get_engine = api.get_engine -# Enable foreign key support in sqlite - see: -# http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html -from sqlalchemy.engine import Engine -from sqlalchemy import event - @event.listens_for(Engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): @@ -42,7 +40,6 @@ def set_sqlite_pragma(dbapi_connection, connection_record): class DBAPISubcloudTest(base.DCManagerTestCase): - @staticmethod def create_subcloud_static(ctxt, **kwargs): values = { @@ -88,8 +85,8 @@ class DBAPISubcloudTest(base.DCManagerTestCase): @staticmethod def create_subcloud_status(ctxt, **kwargs): values = { - 'subcloud_id': 1, - 'endpoint_type': "sysinv", + "subcloud_id": 1, + "endpoint_type": "sysinv", } values.update(kwargs) return db_api.subcloud_status_create(ctxt, **values) @@ -97,11 +94,11 @@ class DBAPISubcloudTest(base.DCManagerTestCase): @staticmethod def create_sw_update_strategy(ctxt, **kwargs): values = { - 'type': consts.SW_UPDATE_TYPE_PATCH, - 'state': consts.SW_UPDATE_STATE_INITIAL, - 'subcloud_apply_type': consts.SUBCLOUD_APPLY_TYPE_PARALLEL, - 'max_parallel_subclouds': 10, - 'stop_on_failure': True, + "type": consts.SW_UPDATE_TYPE_PATCH, + "state": consts.SW_UPDATE_STATE_INITIAL, + "subcloud_apply_type": consts.SUBCLOUD_APPLY_TYPE_PARALLEL, + "max_parallel_subclouds": 10, + "stop_on_failure": True, } values.update(kwargs) return db_api.sw_update_strategy_create(ctxt, **values) @@ -109,10 +106,10 @@ class DBAPISubcloudTest(base.DCManagerTestCase): @staticmethod def create_strategy_step(ctxt, **kwargs): values = { - 'subcloud_id': 1, - 'stage': 1, - 'state': consts.STRATEGY_STATE_INITIAL, - 'details': "The details" + "subcloud_id": 1, + "stage": 1, + "state": consts.STRATEGY_STATE_INITIAL, + "details": "The details", } values.update(kwargs) return db_api.strategy_step_create(ctxt, **values) @@ -123,7 +120,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase): def test_create_subcloud(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) - name = fake_subcloud['name'] + name = fake_subcloud["name"] subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) @@ -135,13 +132,15 @@ class DBAPISubcloudTest(base.DCManagerTestCase): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - fake_subcloud2 = utils.create_subcloud_dict( - base.SUBCLOUD_SAMPLE_DATA_0) - fake_subcloud2['management-start-ip'] = "2.3.4.6" - fake_subcloud2['management-end-ip'] = "2.3.4.7" - self.assertRaises(db_exception.DBDuplicateEntry, - self.create_subcloud, - self.ctx, fake_subcloud2) + fake_subcloud2 = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) + fake_subcloud2["management-start-ip"] = "2.3.4.6" + fake_subcloud2["management-end-ip"] = "2.3.4.7" + self.assertRaises( + db_exception.DBDuplicateEntry, + self.create_subcloud, + self.ctx, + fake_subcloud2, + ) def test_create_multiple_subclouds(self): name1 = 'testname1' @@ -193,7 +192,8 @@ class DBAPISubcloudTest(base.DCManagerTestCase): admin_gateway_ip = '192.168.102.1' rehomed = True updated = db_api.subcloud_update( - self.ctx, subcloud.id, + self.ctx, + subcloud.id, management_state=management_state, availability_status=availability_status, software_version=software_version, @@ -237,13 +237,13 @@ class DBAPISubcloudTest(base.DCManagerTestCase): db_api.subcloud_destroy(self.ctx, subcloud.id) - self.assertRaises(exceptions.SubcloudNotFound, - db_api.subcloud_get, - self.ctx, subcloud.id) + self.assertRaises( + exceptions.SubcloudNotFound, db_api.subcloud_get, self.ctx, subcloud.id + ) def test_subcloud_get_by_name(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) - name = fake_subcloud['name'] + name = fake_subcloud["name"] subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) @@ -252,101 +252,119 @@ class DBAPISubcloudTest(base.DCManagerTestCase): self.assertEqual(name, by_name.name) def test_subcloud_get_by_non_existing_name(self): - name = 'testname' - self.assertRaises(exceptions.SubcloudNameNotFound, - db_api.subcloud_get_by_name, - self.ctx, name) + name = "testname" + self.assertRaises( + exceptions.SubcloudNameNotFound, + db_api.subcloud_get_by_name, + self.ctx, + name, + ) def test_create_subcloud_status(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type = 'testendpoint' + endpoint_type = "testendpoint" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type) + self.ctx, endpoint_type=endpoint_type + ) self.assertIsNotNone(subcloud_status) - new_subcloud_status = db_api.subcloud_status_get(self.ctx, - subcloud.id, - endpoint_type) + new_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint_type + ) self.assertIsNotNone(new_subcloud_status) self.assertEqual(endpoint_type, new_subcloud_status.endpoint_type) - self.assertEqual(dccommon_consts.SYNC_STATUS_UNKNOWN, - new_subcloud_status.sync_status) + self.assertEqual( + dccommon_consts.SYNC_STATUS_UNKNOWN, new_subcloud_status.sync_status + ) def test_create_multiple_subcloud_statuses(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - default_subcloud_statuses = db_api.subcloud_status_get_all(self.ctx, - subcloud.id) + default_subcloud_statuses = db_api.subcloud_status_get_all( + self.ctx, subcloud.id + ) num_default_subcloud_statuses = len(default_subcloud_statuses) - self.assertEqual(num_default_subcloud_statuses, - len(dccommon_consts.ENDPOINT_TYPES_LIST)) + self.assertEqual( + num_default_subcloud_statuses, len(dccommon_consts.ENDPOINT_TYPES_LIST) + ) - endpoint_type1 = 'testendpoint1' + endpoint_type1 = "testendpoint1" subcloud_status1 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type1) + self.ctx, endpoint_type=endpoint_type1 + ) self.assertIsNotNone(subcloud_status1) - endpoint_type2 = 'testendpoint2' + endpoint_type2 = "testendpoint2" subcloud_status2 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type2) + self.ctx, endpoint_type=endpoint_type2 + ) self.assertIsNotNone(subcloud_status2) - endpoint_type3 = 'testendpoint3' + endpoint_type3 = "testendpoint3" subcloud_status3 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type3) + self.ctx, endpoint_type=endpoint_type3 + ) self.assertIsNotNone(subcloud_status3) - new_subcloud_statuses = db_api.subcloud_status_get_all(self.ctx, - subcloud.id) + new_subcloud_statuses = db_api.subcloud_status_get_all(self.ctx, subcloud.id) self.assertIsNotNone(new_subcloud_statuses) - self.assertEqual(num_default_subcloud_statuses + 3, - len(new_subcloud_statuses)) - self.assertEqual(endpoint_type1, - new_subcloud_statuses[num_default_subcloud_statuses] - .endpoint_type) + self.assertEqual( + num_default_subcloud_statuses + 3, len(new_subcloud_statuses) + ) + self.assertEqual( + endpoint_type1, + new_subcloud_statuses[num_default_subcloud_statuses].endpoint_type, + ) self.assertEqual( num_default_subcloud_statuses + 1, - new_subcloud_statuses[num_default_subcloud_statuses].id) + new_subcloud_statuses[num_default_subcloud_statuses].id, + ) self.assertEqual( endpoint_type2, - new_subcloud_statuses[num_default_subcloud_statuses + - 1].endpoint_type) + new_subcloud_statuses[num_default_subcloud_statuses + 1].endpoint_type, + ) self.assertEqual( num_default_subcloud_statuses + 2, - new_subcloud_statuses[num_default_subcloud_statuses + 1].id) + new_subcloud_statuses[num_default_subcloud_statuses + 1].id, + ) self.assertEqual( endpoint_type3, - new_subcloud_statuses[num_default_subcloud_statuses + - 2].endpoint_type) + new_subcloud_statuses[num_default_subcloud_statuses + 2].endpoint_type, + ) self.assertEqual( num_default_subcloud_statuses + 3, - new_subcloud_statuses[num_default_subcloud_statuses + 2].id) + new_subcloud_statuses[num_default_subcloud_statuses + 2].id, + ) def test_update_subcloud_status(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type = 'testendpoint' + endpoint_type = "testendpoint" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type) + self.ctx, endpoint_type=endpoint_type + ) self.assertIsNotNone(subcloud_status) sync_status = dccommon_consts.SYNC_STATUS_IN_SYNC - updated = db_api.subcloud_status_update(self.ctx, subcloud.id, - endpoint_type=endpoint_type, - sync_status=sync_status) + updated = db_api.subcloud_status_update( + self.ctx, + subcloud.id, + endpoint_type=endpoint_type, + sync_status=sync_status, + ) self.assertIsNotNone(updated) self.assertEqual(sync_status, updated.sync_status) - updated_subcloud_status = db_api.subcloud_status_get(self.ctx, - subcloud.id, - endpoint_type) + updated_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint_type + ) self.assertIsNotNone(updated_subcloud_status) self.assertEqual(endpoint_type, updated_subcloud_status.endpoint_type) self.assertEqual(sync_status, updated_subcloud_status.sync_status) @@ -356,44 +374,50 @@ class DBAPISubcloudTest(base.DCManagerTestCase): subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type1 = 'testendpoint1' + endpoint_type1 = "testendpoint1" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type1) + self.ctx, endpoint_type=endpoint_type1 + ) self.assertIsNotNone(subcloud_status) - endpoint_type2 = 'testendpoint2' + endpoint_type2 = "testendpoint2" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type2) + self.ctx, endpoint_type=endpoint_type2 + ) self.assertIsNotNone(subcloud_status) - endpoint_type3 = 'testendpoint3' + endpoint_type3 = "testendpoint3" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type3) + self.ctx, endpoint_type=endpoint_type3 + ) self.assertIsNotNone(subcloud_status) sync_status = dccommon_consts.SYNC_STATUS_IN_SYNC endpoint_type_list = [endpoint_type1, endpoint_type2] - db_api.subcloud_status_update_endpoints(self.ctx, subcloud.id, - endpoint_type_list=endpoint_type_list, - sync_status=sync_status) + db_api.subcloud_status_update_endpoints( + self.ctx, + subcloud.id, + endpoint_type_list=endpoint_type_list, + sync_status=sync_status, + ) - updated_endpoint1_status = db_api.subcloud_status_get(self.ctx, - subcloud.id, - endpoint_type1) + updated_endpoint1_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint_type1 + ) self.assertIsNotNone(updated_endpoint1_status) self.assertEqual(endpoint_type1, updated_endpoint1_status.endpoint_type) self.assertEqual(sync_status, updated_endpoint1_status.sync_status) - updated_endpoint2_status = db_api.subcloud_status_get(self.ctx, - subcloud.id, - endpoint_type2) + updated_endpoint2_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint_type2 + ) self.assertIsNotNone(updated_endpoint2_status) self.assertEqual(endpoint_type2, updated_endpoint2_status.endpoint_type) self.assertEqual(sync_status, updated_endpoint2_status.sync_status) - updated_endpoint3_status = db_api.subcloud_status_get(self.ctx, - subcloud.id, - endpoint_type3) + updated_endpoint3_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint_type3 + ) self.assertIsNotNone(updated_endpoint3_status) self.assertEqual(endpoint_type3, updated_endpoint3_status.endpoint_type) self.assertNotEqual(sync_status, updated_endpoint3_status.sync_status) @@ -403,118 +427,146 @@ class DBAPISubcloudTest(base.DCManagerTestCase): subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type1 = 'testendpoint1' + endpoint_type1 = "testendpoint1" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type1) + self.ctx, endpoint_type=endpoint_type1 + ) self.assertIsNotNone(subcloud_status) - endpoint_type2 = 'testendpoint2' + endpoint_type2 = "testendpoint2" sync_status = dccommon_consts.SYNC_STATUS_IN_SYNC endpoint_type_list = [endpoint_type2] - self.assertRaises(exceptions.SubcloudStatusNotFound, - db_api.subcloud_status_update_endpoints, - self.ctx, subcloud.id, - endpoint_type_list, sync_status) + self.assertRaises( + exceptions.SubcloudStatusNotFound, + db_api.subcloud_status_update_endpoints, + self.ctx, + subcloud.id, + endpoint_type_list, + sync_status, + ) def test_delete_subcloud_status(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type = 'testendpoint' + endpoint_type = "testendpoint" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type) + self.ctx, endpoint_type=endpoint_type + ) self.assertIsNotNone(subcloud_status) db_api.subcloud_status_destroy_all(self.ctx, subcloud.id) - self.assertRaises(exceptions.SubcloudStatusNotFound, - db_api.subcloud_status_get, - self.ctx, subcloud.id, endpoint_type) + self.assertRaises( + exceptions.SubcloudStatusNotFound, + db_api.subcloud_status_get, + self.ctx, + subcloud.id, + endpoint_type, + ) def test_cascade_delete_subcloud_status(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type = 'testendpoint' + endpoint_type = "testendpoint" subcloud_status = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type) + self.ctx, endpoint_type=endpoint_type + ) self.assertIsNotNone(subcloud_status) db_api.subcloud_destroy(self.ctx, subcloud.id) - self.assertRaises(exceptions.SubcloudNotFound, - db_api.subcloud_get, - self.ctx, subcloud.id) - self.assertRaises(exceptions.SubcloudStatusNotFound, - db_api.subcloud_status_get, - self.ctx, subcloud.id, endpoint_type) + self.assertRaises( + exceptions.SubcloudNotFound, db_api.subcloud_get, self.ctx, subcloud.id + ) + self.assertRaises( + exceptions.SubcloudStatusNotFound, + db_api.subcloud_status_get, + self.ctx, + subcloud.id, + endpoint_type, + ) def test_subcloud_status_get_all_by_name(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) - name = fake_subcloud['name'] + name = fake_subcloud["name"] subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - default_subcloud_statuses = db_api.subcloud_status_get_all(self.ctx, - subcloud.id) + default_subcloud_statuses = db_api.subcloud_status_get_all( + self.ctx, subcloud.id + ) num_default_subcloud_statuses = len(default_subcloud_statuses) - self.assertEqual(num_default_subcloud_statuses, - len(dccommon_consts.ENDPOINT_TYPES_LIST)) + self.assertEqual( + num_default_subcloud_statuses, len(dccommon_consts.ENDPOINT_TYPES_LIST) + ) - endpoint_type1 = 'testendpoint1' + endpoint_type1 = "testendpoint1" subcloud_status1 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type1) + self.ctx, endpoint_type=endpoint_type1 + ) self.assertIsNotNone(subcloud_status1) - endpoint_type2 = 'testendpoint2' + endpoint_type2 = "testendpoint2" subcloud_status2 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type2) + self.ctx, endpoint_type=endpoint_type2 + ) self.assertIsNotNone(subcloud_status2) - endpoint_type3 = 'testendpoint3' + endpoint_type3 = "testendpoint3" subcloud_status3 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type3) + self.ctx, endpoint_type=endpoint_type3 + ) self.assertIsNotNone(subcloud_status3) new_subcloud_statuses = db_api.subcloud_status_get_all_by_name( - self.ctx, name) + self.ctx, name + ) self.assertIsNotNone(new_subcloud_statuses) - self.assertEqual(num_default_subcloud_statuses + 3, - len(new_subcloud_statuses)) + self.assertEqual( + num_default_subcloud_statuses + 3, len(new_subcloud_statuses) + ) self.assertEqual( endpoint_type1, - new_subcloud_statuses[num_default_subcloud_statuses].endpoint_type) + new_subcloud_statuses[num_default_subcloud_statuses].endpoint_type, + ) self.assertEqual( num_default_subcloud_statuses + 1, - new_subcloud_statuses[num_default_subcloud_statuses + 0].id) + new_subcloud_statuses[num_default_subcloud_statuses + 0].id, + ) self.assertEqual( endpoint_type2, - new_subcloud_statuses[num_default_subcloud_statuses + - 1].endpoint_type) + new_subcloud_statuses[num_default_subcloud_statuses + 1].endpoint_type, + ) self.assertEqual( num_default_subcloud_statuses + 2, - new_subcloud_statuses[num_default_subcloud_statuses + 1].id) + new_subcloud_statuses[num_default_subcloud_statuses + 1].id, + ) self.assertEqual( endpoint_type3, - new_subcloud_statuses[num_default_subcloud_statuses + - 2].endpoint_type) + new_subcloud_statuses[num_default_subcloud_statuses + 2].endpoint_type, + ) self.assertEqual( num_default_subcloud_statuses + 3, - new_subcloud_statuses[num_default_subcloud_statuses + 2].id) + new_subcloud_statuses[num_default_subcloud_statuses + 2].id, + ) def test_subcloud_status_get_all_by_non_existing_name(self): fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) subcloud = self.create_subcloud(self.ctx, fake_subcloud) self.assertIsNotNone(subcloud) - endpoint_type1 = 'testendpoint1' + endpoint_type1 = "testendpoint1" subcloud_status1 = self.create_subcloud_status( - self.ctx, endpoint_type=endpoint_type1) + self.ctx, endpoint_type=endpoint_type1 + ) self.assertIsNotNone(subcloud_status1) new_subcloud_statuses = db_api.subcloud_status_get_all_by_name( - self.ctx, 'thisnameisnotknown') + self.ctx, "thisnameisnotknown" + ) self.assertEqual([], new_subcloud_statuses) def test_create_sw_update_strategy(self): @@ -524,28 +576,30 @@ class DBAPISubcloudTest(base.DCManagerTestCase): subcloud_apply_type=consts.SUBCLOUD_APPLY_TYPE_SERIAL, max_parallel_subclouds=42, stop_on_failure=False, - state=consts.SW_UPDATE_STATE_APPLYING + state=consts.SW_UPDATE_STATE_APPLYING, ) self.assertIsNotNone(sw_update_strategy) new_sw_update_strategy = db_api.sw_update_strategy_get(self.ctx) self.assertIsNotNone(new_sw_update_strategy) - self.assertEqual(consts.SW_UPDATE_TYPE_UPGRADE, - new_sw_update_strategy.type) - self.assertEqual(consts.SUBCLOUD_APPLY_TYPE_SERIAL, - new_sw_update_strategy.subcloud_apply_type) + self.assertEqual(consts.SW_UPDATE_TYPE_UPGRADE, new_sw_update_strategy.type) + self.assertEqual( + consts.SUBCLOUD_APPLY_TYPE_SERIAL, + new_sw_update_strategy.subcloud_apply_type, + ) self.assertEqual(42, new_sw_update_strategy.max_parallel_subclouds) self.assertEqual(False, new_sw_update_strategy.stop_on_failure) - self.assertEqual(consts.SW_UPDATE_STATE_APPLYING, - new_sw_update_strategy.state) + self.assertEqual( + consts.SW_UPDATE_STATE_APPLYING, new_sw_update_strategy.state + ) def test_create_sw_update_strategy_duplicate(self): sw_update_strategy = self.create_sw_update_strategy(self.ctx) self.assertIsNotNone(sw_update_strategy) - self.assertRaises(db_exception.DBDuplicateEntry, - self.create_sw_update_strategy, - self.ctx) + self.assertRaises( + db_exception.DBDuplicateEntry, self.create_sw_update_strategy, self.ctx + ) def test_update_sw_update_strategy(self): sw_update_strategy = self.create_sw_update_strategy(self.ctx) @@ -565,56 +619,53 @@ class DBAPISubcloudTest(base.DCManagerTestCase): db_api.sw_update_strategy_destroy(self.ctx) - self.assertRaises(exceptions.NotFound, - db_api.sw_update_strategy_get, - self.ctx) + self.assertRaises( + exceptions.NotFound, db_api.sw_update_strategy_get, self.ctx + ) def test_create_strategy_step(self): - name = 'testname' + name = "testname" subcloud = self.create_subcloud_static(self.ctx, name=name) self.assertIsNotNone(subcloud) strategy_step = self.create_strategy_step( - self.ctx, stage=1, details="Bart was here") + self.ctx, stage=1, details="Bart was here" + ) self.assertIsNotNone(strategy_step) - new_strategy_step = db_api.strategy_step_get(self.ctx, - subcloud.id) + new_strategy_step = db_api.strategy_step_get(self.ctx, subcloud.id) self.assertIsNotNone(new_strategy_step) self.assertEqual(1, new_strategy_step.stage) - self.assertEqual(consts.STRATEGY_STATE_INITIAL, - new_strategy_step.state) + self.assertEqual(consts.STRATEGY_STATE_INITIAL, new_strategy_step.state) self.assertEqual("Bart was here", new_strategy_step.details) - new_strategy_step = db_api.strategy_step_get_by_name(self.ctx, - subcloud.name) + new_strategy_step = db_api.strategy_step_get_by_name(self.ctx, subcloud.name) self.assertIsNotNone(new_strategy_step) self.assertEqual(1, new_strategy_step.stage) - self.assertEqual(consts.STRATEGY_STATE_INITIAL, - new_strategy_step.state) + self.assertEqual(consts.STRATEGY_STATE_INITIAL, new_strategy_step.state) self.assertEqual("Bart was here", new_strategy_step.details) def test_strategy_step_get_all(self): - subcloud1 = self.create_subcloud_static(self.ctx, - name='subcloud one') + subcloud1 = self.create_subcloud_static(self.ctx, name="subcloud one") self.assertIsNotNone(subcloud1) - subcloud2 = self.create_subcloud_static(self.ctx, - name='subcloud two') + subcloud2 = self.create_subcloud_static(self.ctx, name="subcloud two") self.assertIsNotNone(subcloud2) - subcloud3 = self.create_subcloud_static(self.ctx, - name='subcloud three') + subcloud3 = self.create_subcloud_static(self.ctx, name="subcloud three") self.assertIsNotNone(subcloud3) strategy_step_stage1 = self.create_strategy_step( - self.ctx, subcloud_id=1, stage=1) + self.ctx, subcloud_id=1, stage=1 + ) self.assertIsNotNone(strategy_step_stage1) strategy_step_stage2 = self.create_strategy_step( - self.ctx, subcloud_id=2, stage=2) + self.ctx, subcloud_id=2, stage=2 + ) self.assertIsNotNone(strategy_step_stage2) strategy_step_stage3 = self.create_strategy_step( - self.ctx, subcloud_id=3, stage=2) + self.ctx, subcloud_id=3, stage=2 + ) self.assertIsNotNone(strategy_step_stage3) new_strategy = db_api.strategy_step_get_all(self.ctx) @@ -624,21 +675,22 @@ class DBAPISubcloudTest(base.DCManagerTestCase): self.assertEqual(1, new_strategy[0].id) self.assertEqual(1, new_strategy[0].stage) - self.assertEqual('subcloud one', new_strategy[0].subcloud.name) + self.assertEqual("subcloud one", new_strategy[0].subcloud.name) self.assertEqual(2, new_strategy[1].id) self.assertEqual(2, new_strategy[1].stage) - self.assertEqual('subcloud two', new_strategy[1].subcloud.name) + self.assertEqual("subcloud two", new_strategy[1].subcloud.name) self.assertEqual(3, new_strategy[2].id) self.assertEqual(2, new_strategy[2].stage) - self.assertEqual('subcloud three', new_strategy[2].subcloud.name) + self.assertEqual("subcloud three", new_strategy[2].subcloud.name) def test_update_strategy_step(self): - name = 'testname' + name = "testname" subcloud = self.create_subcloud_static(self.ctx, name=name) self.assertIsNotNone(subcloud) strategy_step = self.create_strategy_step( - self.ctx, stage=1, details="Bart was here") + self.ctx, stage=1, details="Bart was here" + ) self.assertIsNotNone(strategy_step) updated = db_api.strategy_step_update( @@ -646,28 +698,27 @@ class DBAPISubcloudTest(base.DCManagerTestCase): subcloud.id, stage=2, state=consts.STRATEGY_STATE_COMPLETE, - details="New details" + details="New details", ) self.assertIsNotNone(updated) self.assertEqual(2, updated.stage) self.assertEqual(consts.STRATEGY_STATE_COMPLETE, updated.state) self.assertEqual("New details", updated.details) - updated_strategy_step = db_api.strategy_step_get(self.ctx, - subcloud.id) + updated_strategy_step = db_api.strategy_step_get(self.ctx, subcloud.id) self.assertIsNotNone(updated_strategy_step) self.assertEqual(2, updated_strategy_step.stage) - self.assertEqual(consts.STRATEGY_STATE_COMPLETE, - updated_strategy_step.state) + self.assertEqual(consts.STRATEGY_STATE_COMPLETE, updated_strategy_step.state) self.assertEqual("New details", updated_strategy_step.details) def test_delete_strategy_step(self): - name = 'testname' + name = "testname" subcloud = self.create_subcloud_static(self.ctx, name=name) self.assertIsNotNone(subcloud) strategy_step = self.create_strategy_step( - self.ctx, stage=1, details="Bart was here") + self.ctx, stage=1, details="Bart was here" + ) self.assertIsNotNone(strategy_step) db_api.strategy_step_destroy_all(self.ctx) @@ -675,19 +726,23 @@ class DBAPISubcloudTest(base.DCManagerTestCase): self.assertEqual([], new_strategy) def test_cascade_delete_strategy_step(self): - name = 'testname' + name = "testname" subcloud = self.create_subcloud_static(self.ctx, name=name) self.assertIsNotNone(subcloud) strategy_step = self.create_strategy_step( - self.ctx, stage=1, details="Bart was here") + self.ctx, stage=1, details="Bart was here" + ) self.assertIsNotNone(strategy_step) db_api.subcloud_destroy(self.ctx, subcloud.id) - self.assertRaises(exceptions.SubcloudNotFound, - db_api.subcloud_get, - self.ctx, subcloud.id) + self.assertRaises( + exceptions.SubcloudNotFound, db_api.subcloud_get, self.ctx, subcloud.id + ) - self.assertRaises(exceptions.StrategyStepNotFound, - db_api.strategy_step_get, - self.ctx, subcloud.id) + self.assertRaises( + exceptions.StrategyStepNotFound, + db_api.strategy_step_get, + self.ctx, + subcloud.id, + ) diff --git a/distributedcloud/dcmanager/tests/unit/manager/test_service.py b/distributedcloud/dcmanager/tests/unit/manager/test_service.py index 7ec7ef570..30e841399 100644 --- a/distributedcloud/dcmanager/tests/unit/manager/test_service.py +++ b/distributedcloud/dcmanager/tests/unit/manager/test_service.py @@ -1,29 +1,33 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # -import mock import os.path import sys -sys.modules['fm_core'] = mock.Mock() + +import mock + +from oslo_config import cfg +from oslo_utils import uuidutils from dcmanager.common import scheduler from dcmanager.manager import service from dcmanager.tests import base from dcmanager.tests import utils -from oslo_config import cfg -from oslo_utils import uuidutils +sys.modules['fm_core'] = mock.Mock() CONF = cfg.CONF FAKE_USER = utils.UUID1 FAKE_JOB = utils.UUID2 @@ -100,7 +104,9 @@ class TestDCManagerService(base.DCManagerTestCase): self.context, subcloud_id=1, management_state='testmgmtstatus') mock_subcloud_manager().update_subcloud.assert_called_once_with( - self.context, 1, 'testmgmtstatus', None, None, None, None, None, None, None, None, None) + self.context, 1, 'testmgmtstatus', None, None, None, None, None, None, + None, None, None + ) @mock.patch.object(service, 'SubcloudManager') @mock.patch.object(service, 'rpc_messaging') diff --git a/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py b/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py index 9920a00da..78873e42f 100644 --- a/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py +++ b/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py @@ -1,34 +1,33 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # + import base64 import collections import copy import datetime import json import os +from os import path as os_path +import sys +import threading import mock - -from os import path as os_path from oslo_concurrency import lockutils from oslo_utils import timeutils - -import sys - -sys.modules['fm_core'] = mock.Mock() - -import threading +from tsconfig.tsconfig import SW_VERSION from dccommon import consts as dccommon_consts from dccommon.drivers.openstack import dcmanager_v1 @@ -46,11 +45,11 @@ from dcmanager.tests import base from dcmanager.tests.unit.common import fake_subcloud from dcmanager.tests.unit.manager import test_system_peer_manager from dcmanager.tests import utils -from tsconfig.tsconfig import SW_VERSION +sys.modules['fm_core'] = mock.Mock() + +ANS_PATH = dccommon_consts.ANSIBLE_OVERRIDES_PATH FAKE_PREVIOUS_SW_VERSION = '21.12' - - FAKE_ADMIN_USER_ID = 1 FAKE_SYSINV_USER_ID = 2 FAKE_DCMANAGER_USER_ID = 3 @@ -486,7 +485,8 @@ class TestSubcloudManager(base.DCManagerTestCase): sm.subcloud_deploy_install(self.ctx, subcloud.id, payload=fake_payload) mock_compose_install_command.assert_called_once_with( subcloud_name, - cutils.get_ansible_filename(subcloud_name, consts.INVENTORY_FILE_POSTFIX), + cutils.get_ansible_filename(subcloud_name, + consts.INVENTORY_FILE_POSTFIX), FAKE_PREVIOUS_SW_VERSION) # Verify subcloud was updated with correct values @@ -650,13 +650,13 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_PRE_CONFIG) - fake_payload = {"sysadmin_password": "testpass", - "deploy_playbook": "test_playbook.yaml", - "deploy_overrides": "test_overrides.yaml", - "deploy_chart": "test_chart.yaml", - "deploy_config": "subcloud1.yaml", - consts.BOOTSTRAP_ADDRESS: - fake_subcloud.FAKE_BOOTSTRAP_VALUE[consts.BOOTSTRAP_ADDRESS]} + fake_payload = { + "sysadmin_password": "testpass", "deploy_playbook": "test_playbook.yaml", + "deploy_overrides": "test_overrides.yaml", + "deploy_chart": "test_chart.yaml", "deploy_config": "subcloud1.yaml", + consts.BOOTSTRAP_ADDRESS: + fake_subcloud.FAKE_BOOTSTRAP_VALUE[consts.BOOTSTRAP_ADDRESS] + } sm = subcloud_manager.SubcloudManager() sm.subcloud_deploy_config(self.ctx, subcloud.id, @@ -676,7 +676,8 @@ class TestSubcloudManager(base.DCManagerTestCase): def test_subcloud_deploy_resume( self, mock_run_playbook, mock_get_playbook_for_software_version, mock_update_yml, mock_keyring, mock_create_subcloud_inventory, - mock_prepare_for_deployment, mock_run_subcloud_install): + mock_prepare_for_deployment, mock_run_subcloud_install + ): mock_get_playbook_for_software_version.return_value = "22.12" mock_keyring.get_password.return_value = "testpass" @@ -814,17 +815,12 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager, 'keyring') def test_add_subcloud_with_migration_option( - self, mock_keyring, - mock_write_subcloud_ansible_config, - mock_create_subcloud_inventory, - mock_create_addn_hosts, - mock_get_cached_regionone_data, - mock_sysinv_client, - mock_keystone_client, - mock_delete_subcloud_inventory, - mock_create_intermediate_ca_cert, - mock_compose_rehome_command, - mock_run_playbook): + self, mock_keyring, mock_write_subcloud_ansible_config, + mock_create_subcloud_inventory, mock_create_addn_hosts, + mock_get_cached_regionone_data, mock_sysinv_client, mock_keystone_client, + mock_delete_subcloud_inventory, mock_create_intermediate_ca_cert, + mock_compose_rehome_command, mock_run_playbook + ): values = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) values['deploy_status'] = consts.DEPLOY_STATE_NONE values['migrate'] = 'true' @@ -871,7 +867,8 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager, 'OpenStackDriver') @mock.patch.object(subcloud_manager, 'SysinvClient') - @mock.patch.object(subcloud_manager.SubcloudManager, '_get_cached_regionone_data') + @mock.patch.object(subcloud_manager.SubcloudManager, + '_get_cached_regionone_data') def test_add_subcloud_create_failed(self, mock_get_cached_regionone_data, mock_sysinv_client, @@ -900,10 +897,11 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager, 'keyring') @mock.patch.object(subcloud_manager, 'OpenStackDriver') @mock.patch.object(subcloud_manager, 'SysinvClient') - @mock.patch.object(subcloud_manager.SubcloudManager, '_get_cached_regionone_data') + @mock.patch.object(subcloud_manager.SubcloudManager, + '_get_cached_regionone_data') def test_add_subcloud_with_migrate_option_prep_failed( - self, mock_get_cached_regionone_data, mock_sysinv_client, - mock_keystone_client, mock_keyring): + self, mock_get_cached_regionone_data, mock_sysinv_client, + mock_keystone_client, mock_keyring): values = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0) values['migrate'] = 'true' @@ -930,7 +928,8 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager.SubcloudManager, '_delete_subcloud_cert') - @mock.patch.object(subcloud_manager.SubcloudManager, '_get_cached_regionone_data') + @mock.patch.object(subcloud_manager.SubcloudManager, + '_get_cached_regionone_data') @mock.patch.object(subcloud_manager, 'SysinvClient') @mock.patch.object(subcloud_manager, 'OpenStackDriver') @mock.patch.object(subcloud_manager.SubcloudManager, @@ -961,9 +960,11 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE + ) fake_dcmanager_notification = FakeDCManagerNotifications() @@ -1090,9 +1091,11 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE + ) sm = subcloud_manager.SubcloudManager() fake_dcmanager_cermon_api = FakeDCManagerNotifications() @@ -1168,9 +1171,10 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE) sm = subcloud_manager.SubcloudManager() self.assertRaises(exceptions.SubcloudNotOnline, sm.update_subcloud, self.ctx, @@ -1182,9 +1186,10 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE) fake_dcmanager_cermon_api = FakeDCManagerNotifications() @@ -1217,9 +1222,10 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE) fake_dcmanager_cermon_api = FakeDCManagerNotifications() @@ -1507,16 +1513,20 @@ class TestSubcloudManager(base.DCManagerTestCase): self.assertIsNotNone(subcloud) # Set the subcloud to online/managed - db_api.subcloud_update(self.ctx, subcloud.id, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, subcloud.id, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + availability_status=dccommon_consts.AVAILABILITY_ONLINE + ) ssm = subcloud_state_manager.SubcloudStateManager() with mock.patch.object(db_api, "subcloud_update") as subcloud_update_mock: - ssm.update_subcloud_availability(self.ctx, subcloud.region_name, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - update_state_only=True) + ssm.update_subcloud_availability( + self.ctx, subcloud.region_name, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + update_state_only=True + ) # Verify that the subcloud was not updated subcloud_update_mock.assert_not_called() @@ -1584,9 +1594,11 @@ class TestSubcloudManager(base.DCManagerTestCase): self.assertIsNotNone(subcloud) # Set the subcloud to online/managed - db_api.subcloud_update(self.ctx, subcloud.id, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + availability_status=dccommon_consts.AVAILABILITY_ONLINE) ssm = subcloud_state_manager.SubcloudStateManager() @@ -1659,10 +1671,12 @@ class TestSubcloudManager(base.DCManagerTestCase): ssm = subcloud_state_manager.SubcloudStateManager() # Set the subcloud to online/managed - db_api.subcloud_update(self.ctx, subcloud.id, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - first_identity_sync_complete=True, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + first_identity_sync_complete=True, + availability_status=dccommon_consts.AVAILABILITY_ONLINE) # Update identity endpoints statuses endpoint = dccommon_consts.ENDPOINT_TYPE_IDENTITY @@ -1684,7 +1698,8 @@ class TestSubcloudManager(base.DCManagerTestCase): trigger_subcloud_audits = \ self.fake_dcmanager_audit_api.trigger_subcloud_audits.call_count - # Update identity to new status and get the count of the trigger again + # Update identity to new status and get the count of the trigger + # again ssm.update_subcloud_endpoint_status( self.ctx, subcloud_region=subcloud.region_name, endpoint_type=endpoint, @@ -1757,29 +1772,26 @@ class TestSubcloudManager(base.DCManagerTestCase): def test_get_ansible_filename(self): filename = cutils.get_ansible_filename('subcloud1', consts.INVENTORY_FILE_POSTFIX) - self.assertEqual(filename, - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml') + self.assertEqual( + filename, + f'{ANS_PATH}/subcloud1_inventory.yml' + ) def test_compose_install_command(self): sm = subcloud_manager.SubcloudManager() install_command = sm.compose_install_command( 'subcloud1', - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', + f'{ANS_PATH}/subcloud1_inventory.yml', FAKE_PREVIOUS_SW_VERSION) self.assertEqual( install_command, - [ - 'ansible-playbook', - dccommon_consts.ANSIBLE_SUBCLOUD_INSTALL_PLAYBOOK, - '-i', f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', - '--limit', 'subcloud1', - '-e', f"@{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml", - '-e', "install_release_version=%s" % FAKE_PREVIOUS_SW_VERSION, - '-e', "rvmc_config_file=%s" % - os.path.join(dccommon_consts.ANSIBLE_OVERRIDES_PATH, - 'subcloud1', - dccommon_consts.RVMC_CONFIG_FILE_NAME) - ] + ['ansible-playbook', dccommon_consts.ANSIBLE_SUBCLOUD_INSTALL_PLAYBOOK, + '-i', f'{ANS_PATH}/subcloud1_inventory.yml', '--limit', 'subcloud1', + '-e', f"@{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/" + 'install_values.yml', '-e', "install_release_version=%s" + % FAKE_PREVIOUS_SW_VERSION, '-e', "rvmc_config_file=%s" % os.path.join( + dccommon_consts.ANSIBLE_OVERRIDES_PATH, 'subcloud1', + dccommon_consts.RVMC_CONFIG_FILE_NAME)] ) @mock.patch('os.path.isfile') @@ -1800,7 +1812,8 @@ class TestSubcloudManager(base.DCManagerTestCase): cutils.get_playbook_for_software_version( subcloud_manager.ANSIBLE_SUBCLOUD_PLAYBOOK, FAKE_PREVIOUS_SW_VERSION), - '-i', f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', + '-i', + f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', '--limit', '%s' % subcloud_name, '-e', str("override_files_dir='%s' region_name=%s") % (dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud_region), @@ -1817,14 +1830,14 @@ class TestSubcloudManager(base.DCManagerTestCase): "deploy_config": "subcloud1.yaml"} config_command = sm.compose_config_command( 'subcloud1', - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', + f'{ANS_PATH}/subcloud1_inventory.yml', fake_payload) self.assertEqual( config_command, [ 'ansible-playbook', 'test_playbook.yaml', '-e', - f'@{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_values.yml', '-i', - f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', + f'@{ANS_PATH}/subcloud1_deploy_values.yml', '-i', + f'{ANS_PATH}/subcloud1_inventory.yml', '--limit', 'subcloud1' ] ) @@ -1854,10 +1867,10 @@ class TestSubcloudManager(base.DCManagerTestCase): cutils.get_playbook_for_software_version( subcloud_manager.ANSIBLE_SUBCLOUD_REHOME_PLAYBOOK, FAKE_PREVIOUS_SW_VERSION), - '-i', f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', - '--limit', subcloud_name, - '--timeout', subcloud_manager.REHOME_PLAYBOOK_TIMEOUT, - '-e', extra_vars + '-i', + f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', + '--limit', subcloud_name, '--timeout', + subcloud_manager.REHOME_PLAYBOOK_TIMEOUT, '-e', extra_vars ] ) @@ -1881,7 +1894,8 @@ class TestSubcloudManager(base.DCManagerTestCase): cutils.get_playbook_for_software_version( subcloud_manager.ANSIBLE_SUBCLOUD_REHOME_PLAYBOOK, SW_VERSION), - '-i', f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', + '-i', + f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_inventory.yml', '--limit', subcloud_name, '--timeout', subcloud_manager.REHOME_PLAYBOOK_TIMEOUT, '-e', str("override_files_dir='%s' region_name=%s") % @@ -2232,12 +2246,13 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) sm = subcloud_manager.SubcloudManager() sm.create_subcloud_backups(self.ctx, payload=values) @@ -2249,7 +2264,8 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_run_playbook.assert_called_once() mock_is_healthy.assert_called_once() - # Verify that subcloud has the correct deploy status consts.PRESTAGE_STATE_PACKAGES + # Verify that subcloud has the correct deploy status + # consts.PRESTAGE_STATE_PACKAGES updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) self.assertEqual(consts.BACKUP_STATE_PRE_BACKUP, updated_subcloud.backup_status) @@ -2264,19 +2280,21 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) sm = subcloud_manager.SubcloudManager() sm.create_subcloud_backups(self.ctx, payload=values) mock_parallel_group_operation.assert_called_once() - # Verify that subcloud has the correct deploy status consts.PRESTAGE_STATE_PACKAGES + # Verify that subcloud has the correct deploy status + # consts.PRESTAGE_STATE_PACKAGES updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) self.assertEqual(consts.BACKUP_STATE_VALIDATE_FAILED, updated_subcloud.backup_status) @@ -2291,19 +2309,21 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) sm = subcloud_manager.SubcloudManager() sm.create_subcloud_backups(self.ctx, payload=values) mock_parallel_group_operation.assert_called_once() - # Verify that subcloud has the correct deploy status consts.PRESTAGE_STATE_PACKAGES + # Verify that subcloud has the correct deploy status + # consts.PRESTAGE_STATE_PACKAGES updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) self.assertEqual(consts.BACKUP_STATE_VALIDATE_FAILED, updated_subcloud.backup_status) @@ -2318,19 +2338,21 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) sm = subcloud_manager.SubcloudManager() sm.create_subcloud_backups(self.ctx, payload=values) mock_parallel_group_operation.assert_called_once() - # Verify that subcloud has the correct deploy status consts.PRESTAGE_STATE_PACKAGES + # Verify that subcloud has the correct deploy status + # consts.PRESTAGE_STATE_PACKAGES updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) self.assertEqual(consts.BACKUP_STATE_VALIDATE_FAILED, updated_subcloud.backup_status) @@ -2346,19 +2368,22 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) sm = subcloud_manager.SubcloudManager() - sm.delete_subcloud_backups(self.ctx, payload=values, release_version=release_version) + sm.delete_subcloud_backups(self.ctx, payload=values, + release_version=release_version) mock_parallel_group_operation.assert_called_once() - # Verify that subcloud has the correct deploy status consts.PRESTAGE_STATE_PACKAGES + # Verify that subcloud has the correct deploy status + # consts.PRESTAGE_STATE_PACKAGES updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) self.assertEqual(consts.BACKUP_STATE_UNKNOWN, updated_subcloud.backup_status) @@ -2374,19 +2399,22 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - backup_datetime=None, - backup_status=consts.BACKUP_STATE_UNKNOWN) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + backup_datetime=None, + backup_status=consts.BACKUP_STATE_UNKNOWN) sm = subcloud_manager.SubcloudManager() - sm.delete_subcloud_backups(self.ctx, payload=values, release_version=release_version) + sm.delete_subcloud_backups(self.ctx, payload=values, + release_version=release_version) mock_parallel_group_operation.assert_called_once() - # Verify that subcloud has the correct deploy status consts.PRESTAGE_STATE_PACKAGES + # Verify that subcloud has the correct deploy status + # consts.PRESTAGE_STATE_PACKAGES updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name) self.assertEqual(consts.BACKUP_STATE_UNKNOWN, updated_subcloud.backup_status) @@ -2405,11 +2433,11 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(cutils, 'create_subcloud_inventory') @mock.patch.object(subcloud_manager, 'OpenStackDriver') def test_backup_create_subcloud( - self, mock_keystone_client, - mock_create_subcloud_inventory, mock_compose_backup_command, - mock_clear_subcloud_failure_alarm, mock_run_playbook, - mock_oam_address, mock_keyring, mock_create_backup_file, - mock_delete_subcloud_inventory, mock_is_healthy): + self, mock_keystone_client, + mock_create_subcloud_inventory, mock_compose_backup_command, + mock_clear_subcloud_failure_alarm, mock_run_playbook, + mock_oam_address, mock_keyring, mock_create_backup_file, + mock_delete_subcloud_inventory, mock_is_healthy): subcloud = self.create_subcloud_static( self.ctx, @@ -2419,7 +2447,7 @@ class TestSubcloudManager(base.DCManagerTestCase): values = copy.copy(FAKE_BACKUP_CREATE_LOAD_1) override_file = os_path.join( - dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud.name + "_backup_create_values.yml" + ANS_PATH, subcloud.name + "_backup_create_values.yml" ) mock_create_backup_file.return_value = override_file @@ -2448,7 +2476,7 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager.SubcloudManager, '_create_subcloud_inventory_file') def test_backup_create_subcloud_fail_to_create( - self, mock_create_subcloud_inventory_file): + self, mock_create_subcloud_inventory_file): subcloud = self.create_subcloud_static( self.ctx, @@ -2478,10 +2506,10 @@ class TestSubcloudManager(base.DCManagerTestCase): '_create_backup_overrides_file') def test_delete_subcloud_backup( self, mock_create_backup_overrides_file, - mock_compose_backup_delete_command, - mock_run_playbook, mock_keystone_client, + mock_compose_backup_delete_command, mock_run_playbook, mock_keystone_client, mock_oam_address, mock_create_subcloud_inventory, - mock_delete_subcloud_inventory): + mock_delete_subcloud_inventory + ): subcloud = self.create_subcloud_static( self.ctx, @@ -2492,13 +2520,15 @@ class TestSubcloudManager(base.DCManagerTestCase): RELEASE_VERSION = '22.12' override_file = os_path.join( - dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud.name + "_backup_delete_values.yml" + ANS_PATH, subcloud.name + "_backup_delete_values.yml" ) mock_create_backup_overrides_file.return_value = override_file sm = subcloud_manager.SubcloudManager() sm._delete_subcloud_backup( - self.ctx, payload=values, release_version=RELEASE_VERSION, subcloud=subcloud) + self.ctx, payload=values, + release_version=RELEASE_VERSION, subcloud=subcloud + ) mock_create_backup_overrides_file.assert_called_once() mock_compose_backup_delete_command.assert_called_once() @@ -2539,13 +2569,15 @@ class TestSubcloudManager(base.DCManagerTestCase): RELEASE_VERSION = '22.12' override_file = os_path.join( - dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud.name + "_backup_delete_values.yml" + dccommon_consts.ANSIBLE_OVERRIDES_PATH, + subcloud.name + "_backup_delete_values.yml" ) mock_create_subcloud_inventory_file.return_value = override_file sm = subcloud_manager.SubcloudManager() sm._delete_subcloud_backup( - self.ctx, payload=values, release_version=RELEASE_VERSION, subcloud=subcloud) + self.ctx, payload=values, + release_version=RELEASE_VERSION, subcloud=subcloud) mock_create_subcloud_inventory_file.assert_called_once() mock_compose_backup_delete_command.assert_called_once() @@ -2582,10 +2614,11 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_isdir): values = copy.copy(FAKE_PRESTAGE_PAYLOAD) - subcloud = self.create_subcloud_static(self.ctx, - name='subcloud1', - deploy_status=consts.DEPLOY_STATE_NONE, - software_version=FAKE_SUBCLOUD_SW_VERSION) + subcloud = self.create_subcloud_static( + self.ctx, + name='subcloud1', + deploy_status=consts.DEPLOY_STATE_NONE, + software_version=FAKE_SUBCLOUD_SW_VERSION) mock_run_ansible.return_value = None mock_get_filename_by_prefix.return_value = 'prestage_images_list.txt' @@ -2601,10 +2634,13 @@ class TestSubcloudManager(base.DCManagerTestCase): self.assertEqual(mock_run_ansible.call_count, 2) # Verify the "image_list_file" was passed to the prestage image playbook # for the remote prestage - self.assertIn('image_list_file', mock_run_ansible.call_args_list[1].args[1][5]) + self.assertIn('image_list_file', + mock_run_ansible.call_args_list[1].args[1][5]) # Verify the prestage request release was passed to the playbooks - self.assertIn(FAKE_PRESTAGE_RELEASE, mock_run_ansible.call_args_list[0].args[1][5]) - self.assertIn(FAKE_PRESTAGE_RELEASE, mock_run_ansible.call_args_list[1].args[1][5]) + self.assertIn(FAKE_PRESTAGE_RELEASE, + mock_run_ansible.call_args_list[0].args[1][5]) + self.assertIn(FAKE_PRESTAGE_RELEASE, + mock_run_ansible.call_args_list[1].args[1][5]) @mock.patch.object(os_path, 'isdir') @mock.patch.object(cutils, 'get_filename_by_prefix') @@ -2614,10 +2650,11 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_isdir): values = copy.copy(FAKE_PRESTAGE_PAYLOAD) - subcloud = self.create_subcloud_static(self.ctx, - name='subcloud1', - deploy_status=consts.DEPLOY_STATE_NONE, - software_version=FAKE_SUBCLOUD_SW_VERSION) + subcloud = self.create_subcloud_static( + self.ctx, + name='subcloud1', + deploy_status=consts.DEPLOY_STATE_NONE, + software_version=FAKE_SUBCLOUD_SW_VERSION) mock_run_ansible.return_value = None mock_get_filename_by_prefix.return_value = None @@ -2644,10 +2681,11 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_isdir): values = copy.copy(FAKE_PRESTAGE_PAYLOAD) - subcloud = self.create_subcloud_static(self.ctx, - name='subcloud1', - deploy_status=consts.DEPLOY_STATE_NONE, - software_version=FAKE_PRESTAGE_RELEASE) + subcloud = self.create_subcloud_static( + self.ctx, + name='subcloud1', + deploy_status=consts.DEPLOY_STATE_NONE, + software_version=FAKE_PRESTAGE_RELEASE) mock_run_ansible.return_value = None mock_get_filename_by_prefix.return_value = 'prestage_images_list.txt' @@ -2679,10 +2717,11 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_isdir): values = copy.copy(FAKE_PRESTAGE_PAYLOAD) - subcloud = self.create_subcloud_static(self.ctx, - name='subcloud1', - deploy_status=consts.DEPLOY_STATE_NONE, - software_version=FAKE_PRESTAGE_RELEASE) + subcloud = self.create_subcloud_static( + self.ctx, + name='subcloud1', + deploy_status=consts.DEPLOY_STATE_NONE, + software_version=FAKE_PRESTAGE_RELEASE) mock_run_ansible.return_value = None mock_get_filename_by_prefix.return_value = None @@ -2701,8 +2740,10 @@ class TestSubcloudManager(base.DCManagerTestCase): self.assertTrue( 'image_list_file' not in mock_run_ansible.call_args_list[1].args[1][5]) # Verify the prestage request release was passed to the playbooks - self.assertIn(FAKE_PRESTAGE_RELEASE, mock_run_ansible.call_args_list[0].args[1][5]) - self.assertIn(FAKE_PRESTAGE_RELEASE, mock_run_ansible.call_args_list[1].args[1][5]) + self.assertIn(FAKE_PRESTAGE_RELEASE, + mock_run_ansible.call_args_list[0].args[1][5]) + self.assertIn(FAKE_PRESTAGE_RELEASE, + mock_run_ansible.call_args_list[1].args[1][5]) @mock.patch.object(prestage, 'prestage_images') @mock.patch.object(prestage, 'prestage_packages') @@ -2715,9 +2756,10 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_prestage_images): values = copy.copy(FAKE_PRESTAGE_PAYLOAD) - subcloud = self.create_subcloud_static(self.ctx, - name='subcloud1', - deploy_status=consts.DEPLOY_STATE_NONE) + subcloud = self.create_subcloud_static( + self.ctx, + name='subcloud1', + deploy_status=consts.DEPLOY_STATE_NONE) prestage._prestage_standalone_thread(self.ctx, subcloud, payload=values) mock_run_ansible.return_value = None mock_prestage_packages.assert_called_once_with(self.ctx, subcloud, values) @@ -2749,8 +2791,9 @@ class TestSubcloudManager(base.DCManagerTestCase): # The expiry timestamp is likely a couple of seconds less than the time # the cache is set when it gets here so check if the expiry is greater than # 59m55s from now. - self.assertGreater(cached_regionone_data['expiry'], - datetime.datetime.utcnow() + datetime.timedelta(seconds=3595)) + self.assertGreater( + cached_regionone_data['expiry'], + datetime.datetime.utcnow() + datetime.timedelta(seconds=3595)) cached_regionone_data = sm._get_cached_regionone_data( mock_keystone_client, mock_sysinv_client) expiry2 = cached_regionone_data['expiry'] @@ -2776,13 +2819,15 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('\'', '"') + data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace( + '\'', '"') - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - data_install=data_install) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + data_install=data_install) sm = subcloud_manager.SubcloudManager() sm.restore_subcloud_backups(self.ctx, payload=values) @@ -2814,13 +2859,15 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('\'', '"') + data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace( + '\'', '"') - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED, - data_install=data_install) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED, + data_install=data_install) sm = subcloud_manager.SubcloudManager() return_log = sm.restore_subcloud_backups(self.ctx, payload=values) @@ -2847,13 +2894,15 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('\'', '"') + data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace( + '\'', '"') - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED, - data_install=data_install) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED, + data_install=data_install) sm = subcloud_manager.SubcloudManager() sm.restore_subcloud_backups(self.ctx, payload=values) @@ -2875,10 +2924,11 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_NONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE, - management_state=dccommon_consts.MANAGEMENT_MANAGED) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + management_state=dccommon_consts.MANAGEMENT_MANAGED) sm = subcloud_manager.SubcloudManager() return_log = sm.restore_subcloud_backups(self.ctx, payload=values) @@ -2911,7 +2961,8 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_subcloud_install.return_value = True mock_run_restore_playbook.return_value = True - data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('\'', '"') + data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace( + '\'', '"') values = copy.copy(FAKE_BACKUP_RESTORE_LOAD_WITH_INSTALL) values['with_install'] = True @@ -2921,10 +2972,11 @@ class TestSubcloudManager(base.DCManagerTestCase): data_install=data_install, deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE, - management_state=dccommon_consts.MANAGEMENT_UNMANAGED) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + management_state=dccommon_consts.MANAGEMENT_UNMANAGED) sm = subcloud_manager.SubcloudManager() sm.restore_subcloud_backups(self.ctx, payload=values) @@ -2995,7 +3047,7 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name="sub_migrateable", deploy_status=consts.DEPLOY_STATE_SECONDARY - ) + ) db_api.subcloud_update( self.ctx, subcloud.id, @@ -3006,7 +3058,7 @@ class TestSubcloudManager(base.DCManagerTestCase): self.ctx, name="sub_no_rehome_data", deploy_status=consts.DEPLOY_STATE_SECONDARY - ) + ) db_api.subcloud_update( self.ctx, subcloud.id, @@ -3015,7 +3067,7 @@ class TestSubcloudManager(base.DCManagerTestCase): subcloud = self.create_subcloud_static( self.ctx, name="sub_no_secondary" - ) + ) db_api.subcloud_update( self.ctx, subcloud.id, @@ -3025,7 +3077,7 @@ class TestSubcloudManager(base.DCManagerTestCase): subcloud = self.create_subcloud_static( self.ctx, name="sub_no_saved_payload" - ) + ) db_api.subcloud_update( self.ctx, subcloud.id, @@ -3044,7 +3096,8 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager.SubcloudManager, '_unmanage_system_peer_subcloud') def test_migrate_manage_subcloud_called_unmanage_peer_subcloud( - self, mock_unmanage_system_peer_subcloud): + self, mock_unmanage_system_peer_subcloud + ): sm = subcloud_manager.SubcloudManager() system_peer_test = test_system_peer_manager.TestSystemPeerManager system_peer = system_peer_test.create_system_peer_static(self.ctx) @@ -3056,7 +3109,8 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(subcloud_manager.SubcloudManager, '_unmanage_system_peer_subcloud') def test_migrate_manage_subcloud_not_called_unmanage_peer_subcloud( - self, mock_unmanage_system_peer_subcloud): + self, mock_unmanage_system_peer_subcloud + ): sm = subcloud_manager.SubcloudManager() subcloud = self.create_subcloud_static(self.ctx) # Give empty system peers @@ -3068,7 +3122,8 @@ class TestSubcloudManager(base.DCManagerTestCase): @mock.patch.object(system_peer_manager.SystemPeerManager, 'get_peer_dc_client') def test_unmanage_system_peer_subcloud_ret_false( - self, mock_get_peer_dc_client): + self, mock_get_peer_dc_client + ): sm = subcloud_manager.SubcloudManager() system_peer_test = test_system_peer_manager.TestSystemPeerManager system_peer = system_peer_test.create_system_peer_static(self.ctx) @@ -3114,7 +3169,8 @@ class TestSubcloudManager(base.DCManagerTestCase): # Call add_subcloud method with the test data sm.add_subcloud(mock.MagicMock(), 1, values) - # Assert that the rehome_subcloud and run_deploy_phases methods were not called + # Assert that the rehome_subcloud and run_deploy_phases methods were not + # called mock_rehome_subcloud.assert_not_called() mock_run_deploy_phases.assert_not_called() @@ -3126,15 +3182,17 @@ class TestSubcloudManager(base.DCManagerTestCase): def test_update_subcloud_bootstrap_values(self): fake_bootstrap_values = "{'name': 'TestSubcloud', 'system_mode': 'simplex'}" - fake_result = '{"saved_payload": {"name": "TestSubcloud", "system_mode": "simplex"}}' + fake_result = ('{"saved_payload": {"name": "TestSubcloud", ' + '"system_mode": "simplex"}}') subcloud = self.create_subcloud_static( self.ctx, name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE) fake_dcmanager_cermon_api = FakeDCManagerNotifications() @@ -3163,9 +3221,10 @@ class TestSubcloudManager(base.DCManagerTestCase): name='subcloud1', deploy_status=consts.DEPLOY_STATE_DONE) - db_api.subcloud_update(self.ctx, - subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE) fake_dcmanager_cermon_api = FakeDCManagerNotifications() diff --git a/distributedcloud/dcmanager/tests/unit/manager/test_system_peer_manager.py b/distributedcloud/dcmanager/tests/unit/manager/test_system_peer_manager.py index 0ce0b4ffc..346bfda16 100644 --- a/distributedcloud/dcmanager/tests/unit/manager/test_system_peer_manager.py +++ b/distributedcloud/dcmanager/tests/unit/manager/test_system_peer_manager.py @@ -1,13 +1,14 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import json -import mock import uuid +import mock + from dccommon import exceptions as dccommon_exceptions from dcmanager.db.sqlalchemy import api as db_api from dcmanager.manager import system_peer_manager @@ -432,8 +433,8 @@ class TestSystemPeerManager(base.DCManagerTestCase): @mock.patch.object(system_peer_manager, 'SysinvClient') @mock.patch.object(system_peer_manager, 'DcmanagerClient') def test_delete_peer_group_association_peer_site_association_not_exsit( - self, mock_dc_client, mock_sysinv_client, mock_keystone_client, - mock_utils): + self, mock_dc_client, mock_sysinv_client, mock_keystone_client, mock_utils + ): mock_keystone_client().keystone_client = FakeKeystoneClient() mock_sysinv_client.return_value = FakeSysinvClient() mock_dc_client.return_value = FakeDcmanagerClient() diff --git a/distributedcloud/dcmanager/tests/unit/objects/test_base.py b/distributedcloud/dcmanager/tests/unit/objects/test_base.py index 504e600b1..9900c7d92 100644 --- a/distributedcloud/dcmanager/tests/unit/objects/test_base.py +++ b/distributedcloud/dcmanager/tests/unit/objects/test_base.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB. -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,12 +16,11 @@ # import mock +from oslo_versionedobjects import fields as obj_fields from dcmanager.objects import base as obj_base from dcmanager.tests import base -from oslo_versionedobjects import fields as obj_fields - class TestBaseObject(base.DCManagerTestCase): def test_base_class(self): diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/fakes.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/fakes.py index f0213ceb3..ae1bb7592 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/fakes.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/fakes.py @@ -1,15 +1,16 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import mock + import uuid +import mock + from dccommon import consts as dccommon_consts from dcmanager.common import consts - PREVIOUS_PREVIOUS_VERSION = '01.23' PREVIOUS_VERSION = '12.34' UPGRADED_VERSION = '56.78' diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_finishing_vim_strategy.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_finishing_vim_strategy.py index 0e9727acb..63e14a901 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_finishing_vim_strategy.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_finishing_vim_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dccommon.drivers.openstack import vim @@ -10,15 +11,16 @@ from dcmanager.common import consts from dcmanager.orchestrator.states.firmware import finishing_fw_update from dcmanager.tests.unit.fakes import FakeVimStrategy -from dcmanager.tests.unit.orchestrator.states.firmware.test_base \ - import TestFwUpdateState +from dcmanager.tests.unit.orchestrator.states.firmware.test_base import \ + TestFwUpdateState STRATEGY_APPLIED = FakeVimStrategy(state=vim.STATE_APPLIED) -@mock.patch("dcmanager.orchestrator.states.firmware.finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES", - 3) -@mock.patch("dcmanager.orchestrator.states.firmware.finishing_fw_update.DEFAULT_FAILED_SLEEP", 1) +@mock.patch("dcmanager.orchestrator.states.firmware." + "finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES", 3) +@mock.patch("dcmanager.orchestrator.states.firmware." + "finishing_fw_update.DEFAULT_FAILED_SLEEP", 1) class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState): def setUp(self): @@ -40,7 +42,8 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState): self.sysinv_client.get_hosts = mock.MagicMock() self.sysinv_client.get_host_device_list = mock.MagicMock() - p = mock.patch.object(finishing_fw_update.FinishingFwUpdateState, 'align_subcloud_status') + p = mock.patch.object(finishing_fw_update.FinishingFwUpdateState, + 'align_subcloud_status') self.mock_align = p.start() self.addCleanup(p.stop) diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_importing_firmware.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_importing_firmware.py index 92d9e0cce..199fd64eb 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_importing_firmware.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/firmware/test_importing_firmware.py @@ -1,69 +1,81 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import mock + import uuid -from dcmanager.common import consts +import mock +from dcmanager.common import consts from dcmanager.tests.unit.orchestrator.states.fakes import FakeController from dcmanager.tests.unit.orchestrator.states.fakes import FakeDevice from dcmanager.tests.unit.orchestrator.states.fakes import FakeDeviceImage from dcmanager.tests.unit.orchestrator.states.fakes import FakeDeviceLabel -from dcmanager.tests.unit.orchestrator.states.firmware.test_base \ - import TestFwUpdateState +from dcmanager.tests.unit.orchestrator.states.firmware.test_base import \ + TestFwUpdateState -VENDOR_1 = '1001' -VENDOR_2 = '2002' -VENDOR_3 = '3003' +VENDOR_1 = "1001" +VENDOR_2 = "2002" +VENDOR_3 = "3003" -VENDOR_DEVICE_1 = '9009' +VENDOR_DEVICE_1 = "9009" FAKE_SUBCLOUD_CONTROLLER = FakeController() -FAKE_SUBCLOUD_DEVICE = FakeDevice(str(uuid.uuid4()), - pvendor_id=VENDOR_1, - pdevice_id=VENDOR_DEVICE_1) -FAKE_SUBCLOUD_LABEL = FakeDeviceLabel(label_key='abc', - label_value='123', - pcidevice_uuid=FAKE_SUBCLOUD_DEVICE.uuid) -FAKE_ALL_LABEL = [{}, ] +FAKE_SUBCLOUD_DEVICE = FakeDevice( + str(uuid.uuid4()), pvendor_id=VENDOR_1, pdevice_id=VENDOR_DEVICE_1 +) +FAKE_SUBCLOUD_LABEL = FakeDeviceLabel( + label_key="abc", label_value="123", pcidevice_uuid=FAKE_SUBCLOUD_DEVICE.uuid +) +FAKE_ALL_LABEL = [ + {}, +] # These three enabled images are for three different devices -FAKE_IMAGE_1 = FakeDeviceImage(str(uuid.uuid4()), - pci_vendor=VENDOR_1, - pci_device=VENDOR_DEVICE_1, - applied=True, - applied_labels=FAKE_ALL_LABEL) -FAKE_IMAGE_2 = FakeDeviceImage(str(uuid.uuid4()), - pci_vendor=VENDOR_2, - applied=True, - applied_labels=FAKE_ALL_LABEL) -FAKE_IMAGE_3 = FakeDeviceImage(str(uuid.uuid4()), - pci_vendor=VENDOR_3, - applied=True, - applied_labels=FAKE_ALL_LABEL) +FAKE_IMAGE_1 = FakeDeviceImage( + str(uuid.uuid4()), + pci_vendor=VENDOR_1, + pci_device=VENDOR_DEVICE_1, + applied=True, + applied_labels=FAKE_ALL_LABEL, +) +FAKE_IMAGE_2 = FakeDeviceImage( + str(uuid.uuid4()), + pci_vendor=VENDOR_2, + applied=True, + applied_labels=FAKE_ALL_LABEL, +) +FAKE_IMAGE_3 = FakeDeviceImage( + str(uuid.uuid4()), + pci_vendor=VENDOR_3, + applied=True, + applied_labels=FAKE_ALL_LABEL, +) EMPTY_DEVICE_IMAGES = [] -THREE_DEVICE_IMAGES = [FAKE_IMAGE_1, FAKE_IMAGE_2, FAKE_IMAGE_3, ] +THREE_DEVICE_IMAGES = [ + FAKE_IMAGE_1, + FAKE_IMAGE_2, + FAKE_IMAGE_3, +] class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): - def setUp(self): super(TestFwUpdateImportingFirmwareStage, self).setUp() # set the next state in the chain (when this state is successful) - self.on_success_state = \ - consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY + self.on_success_state = consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY # Add the subcloud being processed by this unit test self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_IMPORTING_FIRMWARE) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_IMPORTING_FIRMWARE + ) # Add mock API endpoints for sysinv client calls invcked by this state self.sysinv_client.get_device_images = mock.MagicMock() @@ -73,16 +85,19 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): self.sysinv_client.upload_device_image = mock.MagicMock() # get_hosts is only called on subcloud self.sysinv_client.get_hosts = mock.MagicMock() - self.sysinv_client.get_hosts.return_value = \ - [FAKE_SUBCLOUD_CONTROLLER, ] + self.sysinv_client.get_hosts.return_value = [ + FAKE_SUBCLOUD_CONTROLLER, + ] # get_host_device_list is only called on subcloud self.sysinv_client.get_host_device_list = mock.MagicMock() - self.sysinv_client.get_host_device_list.return_value = \ - [FAKE_SUBCLOUD_DEVICE, ] + self.sysinv_client.get_host_device_list.return_value = [ + FAKE_SUBCLOUD_DEVICE, + ] # the labels for the device on the subcloud self.sysinv_client.get_device_label_list = mock.MagicMock() - self.sysinv_client.get_device_label_list.return_value = \ - [FAKE_SUBCLOUD_LABEL, ] + self.sysinv_client.get_device_label_list.return_value = [ + FAKE_SUBCLOUD_LABEL, + ] def test_importing_firmware_empty_system_controller(self): """Test importing firmware step when system controller has no FW""" @@ -91,7 +106,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): # second query is subcloud self.sysinv_client.get_device_images.side_effect = [ EMPTY_DEVICE_IMAGES, - THREE_DEVICE_IMAGES, ] + THREE_DEVICE_IMAGES, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -106,10 +122,11 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): self.sysinv_client.apply_device_image.assert_not_called() # Successful promotion to next state - self.assert_step_updated(self.strategy_step.subcloud_id, - self.on_success_state) + self.assert_step_updated( + self.strategy_step.subcloud_id, self.on_success_state + ) - @mock.patch('os.path.isfile') + @mock.patch("os.path.isfile") def test_importing_firmware_empty_subcloud(self, mock_isfile): """Test importing firmware step when subcloud has no FW""" @@ -118,7 +135,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): # second query is subcloud self.sysinv_client.get_device_images.side_effect = [ THREE_DEVICE_IMAGES, - EMPTY_DEVICE_IMAGES, ] + EMPTY_DEVICE_IMAGES, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -135,8 +153,9 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): self.assertEqual(1, self.sysinv_client.apply_device_image.call_count) # Successful promotion to next state - self.assert_step_updated(self.strategy_step.subcloud_id, - self.on_success_state) + self.assert_step_updated( + self.strategy_step.subcloud_id, self.on_success_state + ) def test_importing_firmware_skips(self): """Test importing firmware skips when subcloud matches controller.""" @@ -146,7 +165,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): # Both are the same self.sysinv_client.get_device_images.side_effect = [ THREE_DEVICE_IMAGES, - THREE_DEVICE_IMAGES, ] + THREE_DEVICE_IMAGES, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -156,5 +176,6 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState): self.sysinv_client.upload_device_image.assert_not_called() # On success, should have moved to the next state - self.assert_step_updated(self.strategy_step.subcloud_id, - self.on_success_state) + self.assert_step_updated( + self.strategy_step.subcloud_id, self.on_success_state + ) diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_creating_vim_kube_upgrade_strategy.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_creating_vim_kube_upgrade_strategy.py index 677991c60..a8a77c25b 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_creating_vim_kube_upgrade_strategy.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_creating_vim_kube_upgrade_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dccommon.drivers.openstack import vim @@ -36,21 +37,21 @@ KUBE_VERSION_LIST = [ version='v1.2.5', target=False, state='available'), - ] +] KUBE_VERSION_LIST_SC = [ FakeKubeVersion(obj_id=1, version='v1.2.5', target=True, state='active') - ] +] KUBE_VERSION_LIST_SC_2 = [ FakeKubeVersion(obj_id=1, version='v1.2.4', target=True, state='active') - ] +] class TestCreatingVIMKubeUpgradeStrategyStage(CreatingVIMStrategyStageMixin, diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_pre_check.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_pre_check.py index 7973ce815..e4ace1001 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_pre_check.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/kube/test_pre_check.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dcmanager.common.consts import DEPLOY_STATE_DONE @@ -72,7 +73,7 @@ KUBE_VERSION_LIST = [ version='v1.2.5', target=False, state='available'), - ] +] KUBE_VERSION_LIST_2 = [ FakeKubeVersion(obj_id=1, @@ -87,7 +88,7 @@ KUBE_VERSION_LIST_2 = [ version='v1.2.6', target=False, state='available'), - ] +] class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): @@ -100,8 +101,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK) # mock there not being a kube upgrade in progress self.sysinv_client.get_kube_upgrades = mock.MagicMock() @@ -109,7 +110,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): self.fm_client.get_alarms = mock.MagicMock() self.sysinv_client.get_kube_upgrade_health = mock.MagicMock() - self.sysinv_client.get_kube_upgrade_health.return_value = KUBERNETES_UPGRADE_HEALTH_RESPONSE_SUCCESS + self.sysinv_client.get_kube_upgrade_health.return_value = ( + KUBERNETES_UPGRADE_HEALTH_RESPONSE_SUCCESS) # mock the get_kube_versions calls self.sysinv_client.get_kube_versions = mock.MagicMock() @@ -151,8 +153,10 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): self.subcloud.id, deploy_status=DEPLOY_STATE_DONE) - self.fm_client.get_alarms.return_value = [MEMORY_THRESHOLD_ALARM, KUBERNETES_UPGRADE_ALARM] - self.sysinv_client.get_kube_upgrade_health.return_value = KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM + self.fm_client.get_alarms.return_value = [MEMORY_THRESHOLD_ALARM, + KUBERNETES_UPGRADE_ALARM] + self.sysinv_client.get_kube_upgrade_health.return_value = ( + KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM) self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()] self.sysinv_client.get_kube_versions.return_value = [ FakeKubeVersion(obj_id=1, @@ -165,15 +169,18 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): self.assert_step_updated(self.strategy_step.subcloud_id, STRATEGY_STATE_FAILED) - def test_pre_check_subcloud_failed_health_check_with_allowed_management_alarms(self): + def test_pre_check_subcloud_failed_health_check_with_allowed_management_alarms( + self): """Test pre check step where subcloud has management affecting alarms""" db_api.subcloud_update(self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE) - self.fm_client.get_alarms.return_value = [CONFIG_OUT_OF_DATE_ALARM, KUBERNETES_UPGRADE_ALARM] - self.sysinv_client.get_kube_upgrade_health.return_value = KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM + self.fm_client.get_alarms.return_value = [CONFIG_OUT_OF_DATE_ALARM, + KUBERNETES_UPGRADE_ALARM] + self.sysinv_client.get_kube_upgrade_health.return_value = ( + KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM) self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()] self.sysinv_client.get_kube_versions.return_value = [ FakeKubeVersion(obj_id=1, @@ -183,8 +190,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): ] self.worker.perform_state_action(self.strategy_step) self.sysinv_client.get_kube_upgrade_health.assert_called_once() - self.assert_step_updated(self.strategy_step.subcloud_id, - STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY) + self.assert_step_updated( + self.strategy_step.subcloud_id, + STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY) def test_pre_check_subcloud_failed_health_check_with_non_management_alarms(self): """Test pre check step where subcloud has non-management affecting alarms""" @@ -193,7 +201,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): self.subcloud.id, deploy_status=DEPLOY_STATE_DONE) - self.sysinv_client.get_kube_upgrade_health.return_value = KUBERNETES_UPGRADE_HEALTH_RESPONSE_NON_MGMT_AFFECTING_ALARM + self.sysinv_client.get_kube_upgrade_health.return_value = ( + KUBERNETES_UPGRADE_HEALTH_RESPONSE_NON_MGMT_AFFECTING_ALARM) self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()] self.sysinv_client.get_kube_versions.return_value = [ FakeKubeVersion(obj_id=1, @@ -204,8 +213,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState): self.worker.perform_state_action(self.strategy_step) self.sysinv_client.get_kube_upgrade_health.assert_called_once() - self.assert_step_updated(self.strategy_step.subcloud_id, - STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY) + self.assert_step_updated( + self.strategy_step.subcloud_id, + STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY) def test_pre_check_no_sys_controller_active_version(self): """Test pre check step where system controller has no active version diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_creating_vim_patch_strategy.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_creating_vim_patch_strategy.py index 8437f2836..ab12e074d 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_creating_vim_patch_strategy.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_creating_vim_patch_strategy.py @@ -1,10 +1,13 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + from collections import namedtuple +import mock + from dccommon.drivers.openstack import vim from dcmanager.common import consts from dcmanager.tests.unit.fakes import FakeVimStrategy @@ -12,7 +15,6 @@ from dcmanager.tests.unit.orchestrator.states.patch.test_base import \ TestPatchState from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy import \ CreatingVIMStrategyStageMixin -import mock BuildPhase = namedtuple("BuildPhase", "reason") diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_finishing_patch_strategy.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_finishing_patch_strategy.py index f7f4f6356..297b9255c 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_finishing_patch_strategy.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_finishing_patch_strategy.py @@ -1,14 +1,16 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + +import mock + from dcmanager.common import consts from dcmanager.orchestrator.orch_thread import OrchThread from dcmanager.tests.unit.orchestrator.states.fakes import FakeLoad from dcmanager.tests.unit.orchestrator.states.patch.test_base import \ TestPatchState -import mock REGION_ONE_PATCHES = {"DC.1": {"sw_version": "20.12", "repostate": "Applied", diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_pre_check.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_pre_check.py index 1f7177305..2aa5977c5 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_pre_check.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_pre_check.py @@ -1,14 +1,16 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + +import mock + from dcmanager.common import consts from dcmanager.orchestrator.states.patch.pre_check import IGNORED_ALARMS_IDS from dcmanager.tests.unit.orchestrator.states.fakes import FakeAlarm from dcmanager.tests.unit.orchestrator.states.patch.test_base import \ TestPatchState -import mock class TestPatchPreCheckStage(TestPatchState): diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_updating_patches.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_updating_patches.py index c7bdacfdf..23eb4e266 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_updating_patches.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/patch/test_updating_patches.py @@ -1,17 +1,19 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + from os import path as os_path +import mock + from dcmanager.common import consts from dcmanager.orchestrator.orch_thread import OrchThread from dcmanager.tests.unit.common import fake_strategy from dcmanager.tests.unit.orchestrator.states.fakes import FakeLoad from dcmanager.tests.unit.orchestrator.states.patch.test_base import \ TestPatchState -import mock REGION_ONE_PATCHES = {"DC.1": {"sw_version": "20.12", "repostate": "Applied", diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/prestage/test_states.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/prestage/test_states.py index b19443414..1d75d04c2 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/prestage/test_states.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/prestage/test_states.py @@ -1,30 +1,28 @@ # -# Copyright (c) 2022-2023 Wind River Systems, Inc. +# Copyright (c) 2022-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import base64 -import mock import threading -from dcmanager.common import consts -from dcmanager.common import exceptions +import mock +from dcmanager.common import consts from dcmanager.common.consts import DEPLOY_STATE_DONE from dcmanager.common.consts import STRATEGY_STATE_COMPLETE from dcmanager.common.consts import STRATEGY_STATE_FAILED from dcmanager.common.consts import STRATEGY_STATE_PRESTAGE_IMAGES from dcmanager.common.consts import STRATEGY_STATE_PRESTAGE_PACKAGES from dcmanager.common.consts import STRATEGY_STATE_PRESTAGE_PRE_CHECK - +from dcmanager.common import exceptions from dcmanager.db.sqlalchemy import api as db_api - from dcmanager.tests.unit.common import fake_strategy from dcmanager.tests.unit.orchestrator.test_base import TestSwUpdate OAM_FLOATING_IP = "10.10.10.12" -FAKE_PASSWORD = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') +FAKE_PASSWORD = (base64.b64encode("testpass".encode("utf-8"))).decode("ascii") class TestPrestage(TestSwUpdate): @@ -44,29 +42,30 @@ class TestPrestagePreCheckState(TestPrestage): # The subcloud is online, managed with deploy_state 'installed' self.subcloud = self.setup_subcloud() - p = mock.patch('dcmanager.common.prestage.validate_prestage') + p = mock.patch("dcmanager.common.prestage.validate_prestage") self.mock_prestage_subcloud = p.start() self.mock_prestage_subcloud.return_value = OAM_FLOATING_IP self.addCleanup(p.stop) - t = mock.patch.object(threading.Thread, 'start') + t = mock.patch.object(threading.Thread, "start") self.mock_thread_start = t.start() self.addCleanup(t.stop) # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, STRATEGY_STATE_PRESTAGE_PRE_CHECK) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, STRATEGY_STATE_PRESTAGE_PRE_CHECK + ) def test_prestage_prepare_no_extra_args(self): next_state = STRATEGY_STATE_FAILED # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE + ) self.strategy = fake_strategy.create_fake_strategy( - self.ctx, - self.DEFAULT_STRATEGY_TYPE) + self.ctx, self.DEFAULT_STRATEGY_TYPE + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -77,63 +76,68 @@ class TestPrestagePreCheckState(TestPrestage): def test_prestage_prepare_validate_failed(self): next_state = STRATEGY_STATE_FAILED # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE + ) - self.mock_prestage_subcloud.side_effect = exceptions.PrestagePreCheckFailedException( - subcloud=None, orch_skip=False, details="test") + self.mock_prestage_subcloud.side_effect = ( + exceptions.PrestagePreCheckFailedException( + subcloud=None, orch_skip=False, details="test" + ) + ) - extra_args = {"sysadmin_password": FAKE_PASSWORD, - "force": False, - 'oam_floating_ip': OAM_FLOATING_IP} + extra_args = { + "sysadmin_password": FAKE_PASSWORD, + "force": False, + "oam_floating_ip": OAM_FLOATING_IP, + } self.strategy = fake_strategy.create_fake_strategy( - self.ctx, - self.DEFAULT_STRATEGY_TYPE, - extra_args=extra_args) + self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - new_strategy_step = db_api.strategy_step_get(self.ctx, - self.subcloud.id) + new_strategy_step = db_api.strategy_step_get(self.ctx, self.subcloud.id) # Verify the transition to the expected next state self.assert_step_updated(self.strategy_step.subcloud_id, next_state) # The strategy step details field should be updated with the Exception string - self.assertTrue('test' in str(new_strategy_step.details)) + self.assertTrue("test" in str(new_strategy_step.details)) def test_prestage_prepare_validate_failed_skipped(self): next_state = STRATEGY_STATE_COMPLETE # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE + ) - self.mock_prestage_subcloud.side_effect = exceptions.PrestagePreCheckFailedException( - subcloud=None, orch_skip=True, details="test") + self.mock_prestage_subcloud.side_effect = ( + exceptions.PrestagePreCheckFailedException( + subcloud=None, orch_skip=True, details="test" + ) + ) - extra_args = {"sysadmin_password": FAKE_PASSWORD, - "force": False, - 'oam_floating_ip': OAM_FLOATING_IP} + extra_args = { + "sysadmin_password": FAKE_PASSWORD, + "force": False, + "oam_floating_ip": OAM_FLOATING_IP, + } self.strategy = fake_strategy.create_fake_strategy( - self.ctx, - self.DEFAULT_STRATEGY_TYPE, - extra_args=extra_args) + self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - new_strategy_step = db_api.strategy_step_get(self.ctx, - self.subcloud.id) + new_strategy_step = db_api.strategy_step_get(self.ctx, self.subcloud.id) # Verify the transition to the expected next state self.assert_step_updated(self.strategy_step.subcloud_id, next_state) # The strategy step details field should be updated with the Exception string - self.assertTrue('test' in str(new_strategy_step.details)) + self.assertTrue("test" in str(new_strategy_step.details)) class TestPrestagePackageState(TestPrestage): - def setUp(self): super(TestPrestagePackageState, self).setUp() @@ -141,32 +145,31 @@ class TestPrestagePackageState(TestPrestage): # The subcloud is online, managed with deploy_state 'installed' self.subcloud = self.setup_subcloud() - p = mock.patch('dcmanager.common.prestage.prestage_packages') + p = mock.patch("dcmanager.common.prestage.prestage_packages") self.mock_prestage_packages = p.start() self.addCleanup(p.stop) # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, STRATEGY_STATE_PRESTAGE_PACKAGES) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, STRATEGY_STATE_PRESTAGE_PACKAGES + ) def test_prestage_prestage_package(self): - next_state = STRATEGY_STATE_PRESTAGE_IMAGES # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE + ) - oam_floating_ip_dict = { - self.subcloud.name: OAM_FLOATING_IP + oam_floating_ip_dict = {self.subcloud.name: OAM_FLOATING_IP} + extra_args = { + "sysadmin_password": FAKE_PASSWORD, + "force": False, + "oam_floating_ip_dict": oam_floating_ip_dict, } - extra_args = {"sysadmin_password": FAKE_PASSWORD, - "force": False, - "oam_floating_ip_dict": oam_floating_ip_dict} self.strategy = fake_strategy.create_fake_strategy( - self.ctx, - self.DEFAULT_STRATEGY_TYPE, - extra_args=extra_args) + self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -176,7 +179,6 @@ class TestPrestagePackageState(TestPrestage): class TestPrestageImagesState(TestPrestage): - def setUp(self): super(TestPrestageImagesState, self).setUp() @@ -184,32 +186,31 @@ class TestPrestageImagesState(TestPrestage): # The subcloud is online, managed with deploy_state 'installed' self.subcloud = self.setup_subcloud() - p = mock.patch('dcmanager.common.prestage.prestage_images') + p = mock.patch("dcmanager.common.prestage.prestage_images") self.mock_prestage_packages = p.start() self.addCleanup(p.stop) # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, STRATEGY_STATE_PRESTAGE_IMAGES) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, STRATEGY_STATE_PRESTAGE_IMAGES + ) def test_prestage_prestage_images(self): - next_state = STRATEGY_STATE_COMPLETE # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE + ) - oam_floating_ip_dict = { - self.subcloud.name: OAM_FLOATING_IP + oam_floating_ip_dict = {self.subcloud.name: OAM_FLOATING_IP} + extra_args = { + "sysadmin_password": FAKE_PASSWORD, + "force": False, + "oam_floating_ip_dict": oam_floating_ip_dict, } - extra_args = {"sysadmin_password": FAKE_PASSWORD, - "force": False, - "oam_floating_ip_dict": oam_floating_ip_dict} self.strategy = fake_strategy.create_fake_strategy( - self.ctx, - self.DEFAULT_STRATEGY_TYPE, - extra_args=extra_args) + self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_base.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_base.py index 200089c94..a958e571b 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_base.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_base.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -26,7 +26,8 @@ class TestSoftwareOrchestrator(TestSwUpdate): return_value=self.software_client, ) self.sysinv_cache_client_mock = mock.patch( - "%s.get_sysinv_client" % CACHE_CLIENT_PATH, return_value=self.sysinv_client + "%s.get_sysinv_client" % CACHE_CLIENT_PATH, + return_value=self.sysinv_client ) self.software_cache_client_mock.start() self.sysinv_cache_client_mock.start() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_lock_controller.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_lock_controller.py index 299387d5b..ecd845798 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_lock_controller.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_lock_controller.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -43,14 +44,17 @@ class TestSwLockSimplexStage(TestSoftwareOrchestrator): self.setup_fake_controllers('controller-0') def setup_fake_controllers(self, host_name): - self.CONTROLLER_UNLOCKED = FakeController(hostname=host_name, - administrative=consts.ADMIN_UNLOCKED) + self.CONTROLLER_UNLOCKED = FakeController( + hostname=host_name, administrative=consts.ADMIN_UNLOCKED + ) self.CONTROLLER_LOCKED = FakeController(hostname=host_name, administrative=consts.ADMIN_LOCKED) - self.CONTROLLER_LOCKING = FakeController(hostname=host_name, - administrative=consts.ADMIN_UNLOCKED, - ihost_action='lock', - task='Locking') + self.CONTROLLER_LOCKING = FakeController( + hostname=host_name, + administrative=consts.ADMIN_UNLOCKED, + ihost_action='lock', + task='Locking' + ) self.CONTROLLER_LOCKING_FAILED = \ FakeController(hostname=host_name, administrative=consts.ADMIN_UNLOCKED, diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_upload.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_upload.py index 70f12ac34..9c14fe778 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_upload.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/software/test_upload.py @@ -1,15 +1,19 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import mock + import os + +import mock + from oslo_config import cfg from dcmanager.common import consts from dcmanager.orchestrator.states.software.upload import UploadState -from dcmanager.tests.unit.orchestrator.states.software.test_base import TestSoftwareOrchestrator +from dcmanager.tests.unit.orchestrator.states.software.test_base \ + import TestSoftwareOrchestrator REGION_ONE_RELEASES = {"DC_20.12.1": {"sw_version": "20.12.1", "state": "deployed"}, @@ -77,10 +81,10 @@ class TestUploadState(TestSoftwareOrchestrator): # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - self.software_client.upload.assert_called_once_with([consts.RELEASE_VAULT_DIR + - '/20.12/DC_20.12.3.patch', - consts.RELEASE_VAULT_DIR + - '/20.12/DC_20.12.4.patch']) + self.software_client.upload.assert_called_once_with([ + consts.RELEASE_VAULT_DIR + '/20.12/DC_20.12.3.patch', + consts.RELEASE_VAULT_DIR + '/20.12/DC_20.12.4.patch' + ]) # On success, the state should transition to the next state self.assert_step_updated(self.strategy_step.subcloud_id, @@ -98,10 +102,10 @@ class TestUploadState(TestSoftwareOrchestrator): # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - self.software_client.upload.assert_called_once_with([consts.RELEASE_VAULT_DIR + - '/20.12/DC_20.12.3.patch', - consts.RELEASE_VAULT_DIR + - '/20.12/DC_20.12.4.patch']) + self.software_client.upload.assert_called_once_with([ + consts.RELEASE_VAULT_DIR + '/20.12/DC_20.12.3.patch', + consts.RELEASE_VAULT_DIR + '/20.12/DC_20.12.4.patch' + ]) # On success, the state should transition to the next state self.assert_step_updated(self.strategy_step.subcloud_id, @@ -152,15 +156,16 @@ class TestUploadState(TestSoftwareOrchestrator): mock_read_from_cache.side_effect = [REGION_ONE_RELEASES_2, False] mock_is_dir.return_value = True mock_listdir.return_value = ["DC_22.12.0.iso", "DC_22.12.0.sig"] - self.software_client.query.side_effect = [SUBCLOUD_RELEASES, REGION_ONE_RELEASES_2] + self.software_client.query.side_effect = [ + SUBCLOUD_RELEASES, REGION_ONE_RELEASES_2] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - self.software_client.upload.assert_called_once_with([consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.0.iso', - consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.0.sig']) + self.software_client.upload.assert_called_once_with([ + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.0.iso', + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.0.sig' + ]) # On success, the state should transition to the next state self.assert_step_updated(self.strategy_step.subcloud_id, @@ -183,10 +188,10 @@ class TestUploadState(TestSoftwareOrchestrator): # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - self.software_client.upload.assert_called_once_with([consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.0.iso', - consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.0.sig']) + self.software_client.upload.assert_called_once_with([ + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.0.iso', + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.0.sig' + ]) # On success, the state should transition to the next state self.assert_step_updated(self.strategy_step.subcloud_id, @@ -209,12 +214,11 @@ class TestUploadState(TestSoftwareOrchestrator): # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) - self.software_client.upload.assert_called_once_with([consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.1.patch', - consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.0.iso', - consts.RELEASE_VAULT_DIR + - '/22.12/DC_22.12.0.sig']) + self.software_client.upload.assert_called_once_with([ + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.1.patch', + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.0.iso', + consts.RELEASE_VAULT_DIR + '/22.12/DC_22.12.0.sig' + ]) # On success, the state should transition to the next state self.assert_step_updated(self.strategy_step.subcloud_id, diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/test_applying_vim_strategy.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/test_applying_vim_strategy.py index 749c0fcc4..5ee4d836a 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/test_applying_vim_strategy.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/test_applying_vim_strategy.py @@ -1,18 +1,19 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools + import mock from dccommon.drivers.openstack import vim from dcmanager.common import consts from dcmanager.orchestrator.states import applying_vim_strategy - from dcmanager.tests.unit.fakes import FakeVimStrategy -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState STRATEGY_READY_TO_APPLY = FakeVimStrategy(state=vim.STATE_READY_TO_APPLY) STRATEGY_APPLYING = FakeVimStrategy(state=vim.STATE_APPLYING) @@ -21,20 +22,29 @@ STRATEGY_APPLY_FAILED = FakeVimStrategy(state=vim.STATE_APPLY_FAILED) # Note: although the values of DEFAULT_MAX_WAIT_ATTEMPTS and WAIT_INTERVAL of -# "dcmanager.orchestrator.states.applying_vim_strategy" are patched in the lines below, -# the default values of parameters "wait_attempts" and "wait_interval" of method -# "ApplyingVIMStrategyState.__init__" don't change. To fix this, we patch these default -# values in "ApplyingVIMStrategyState.__init__.__defaults__". -@mock.patch("dcmanager.orchestrator.states.applying_vim_strategy." - "DEFAULT_MAX_FAILED_QUERIES", 3) -@mock.patch("dcmanager.orchestrator.states.applying_vim_strategy." - "DEFAULT_MAX_WAIT_ATTEMPTS", 3) -@mock.patch("dcmanager.orchestrator.states.applying_vim_strategy." - "WAIT_INTERVAL", 1) -@mock.patch("dcmanager.orchestrator.states.applying_vim_strategy." - "ApplyingVIMStrategyState.__init__.__defaults__", (3, 1)) +# "dcmanager.orchestrator.states.applying_vim_strategy" are patched in the lines +# below, the default values of parameters "wait_attempts" and "wait_interval" of +# method "ApplyingVIMStrategyState.__init__" don't change. To fix this, we patch +# these default values in "ApplyingVIMStrategyState.__init__.__defaults__". +@mock.patch( + "dcmanager.orchestrator.states.applying_vim_strategy." + "DEFAULT_MAX_FAILED_QUERIES", + 3, +) +@mock.patch( + "dcmanager.orchestrator.states.applying_vim_strategy." + "DEFAULT_MAX_WAIT_ATTEMPTS", + 3, +) +@mock.patch( + "dcmanager.orchestrator.states.applying_vim_strategy.WAIT_INTERVAL", 1 +) +@mock.patch( + "dcmanager.orchestrator.states.applying_vim_strategy." + "ApplyingVIMStrategyState.__init__.__defaults__", + (3, 1), +) class ApplyingVIMStrategyMixin(object): - def set_state(self, state, success_state): self.state = state self.on_success_state = success_state @@ -67,8 +77,9 @@ class ApplyingVIMStrategyMixin(object): self.worker.perform_state_action(self.strategy_step) # Successful promotion to next state - self.assert_step_updated(self.strategy_step.subcloud_id, - self.on_success_state) + self.assert_step_updated( + self.strategy_step.subcloud_id, self.on_success_state + ) def test_applying_vim_strategy_raises_exception(self): """Test applying a VIM strategy that raises an exception""" @@ -77,15 +88,17 @@ class ApplyingVIMStrategyMixin(object): self.vim_client.get_strategy.return_value = STRATEGY_READY_TO_APPLY # raise an exception during apply_strategy - self.vim_client.apply_strategy.side_effect =\ - Exception("HTTPBadRequest: this is a fake exception") + self.vim_client.apply_strategy.side_effect = Exception( + "HTTPBadRequest: this is a fake exception" + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Failure case - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_applying_vim_strategy_fails_apply_immediately(self): """Test applying a VIM strategy that returns a failed result""" @@ -100,8 +113,9 @@ class ApplyingVIMStrategyMixin(object): self.worker.perform_state_action(self.strategy_step) # Failure case - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_applying_vim_strategy_fails_apply_later(self): """Test applying a VIM strategy that starts to apply but then fails""" @@ -120,8 +134,9 @@ class ApplyingVIMStrategyMixin(object): self.worker.perform_state_action(self.strategy_step) # Failure case - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_applying_vim_strategy_timeout(self): """Test applying a VIM strategy that times out""" @@ -129,7 +144,11 @@ class ApplyingVIMStrategyMixin(object): # first api query is before the apply # test where it never progresses past 'applying' self.vim_client.get_strategy.side_effect = itertools.chain( - [STRATEGY_READY_TO_APPLY, ], itertools.repeat(STRATEGY_APPLYING)) + [ + STRATEGY_READY_TO_APPLY, + ], + itertools.repeat(STRATEGY_APPLYING), + ) # API calls acts as expected self.vim_client.apply_strategy.return_value = STRATEGY_APPLYING @@ -138,12 +157,15 @@ class ApplyingVIMStrategyMixin(object): self.worker.perform_state_action(self.strategy_step) # verify the max number of queries was attempted (plus 1 before loop) - self.assertEqual(applying_vim_strategy.DEFAULT_MAX_WAIT_ATTEMPTS + 1, - self.vim_client.get_strategy.call_count) + self.assertEqual( + applying_vim_strategy.DEFAULT_MAX_WAIT_ATTEMPTS + 1, + self.vim_client.get_strategy.call_count, + ) # Failure case - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_applying_vim_strategy_already_applying_and_completes(self): """Test applying a VIM strategy while one already is applying""" @@ -162,8 +184,9 @@ class ApplyingVIMStrategyMixin(object): self.vim_client.apply_strategy.assert_not_called() # SUCCESS case - self.assert_step_updated(self.strategy_step.subcloud_id, - self.on_success_state) + self.assert_step_updated( + self.strategy_step.subcloud_id, self.on_success_state + ) def test_applying_vim_strategy_already_exists_and_is_broken(self): """Test applying a VIM strategy while a broken strategy exists""" @@ -181,14 +204,17 @@ class ApplyingVIMStrategyMixin(object): self.vim_client.apply_strategy.assert_not_called() # Failure case - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) -class TestSwUpgradeApplyingVIMStrategyStage(ApplyingVIMStrategyMixin, - TestSwUpgradeState): - +class TestSwUpgradeApplyingVIMStrategyStage( + ApplyingVIMStrategyMixin, TestSwUpgradeState +): def setUp(self): super(TestSwUpgradeApplyingVIMStrategyStage, self).setUp() - self.set_state(consts.STRATEGY_STATE_APPLYING_VIM_UPGRADE_STRATEGY, - consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0) + self.set_state( + consts.STRATEGY_STATE_APPLYING_VIM_UPGRADE_STRATEGY, + consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0, + ) diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_activating_upgrade.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_activating_upgrade.py index 9fde2d685..0a9aefa53 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_activating_upgrade.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_activating_upgrade.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -20,12 +21,12 @@ ACTIVATING_FAILED = FakeUpgrade(state='activation-failed') ALREADY_ACTIVATED_UPGRADE = FakeUpgrade(state='activation-complete') -@mock.patch("dcmanager.orchestrator.states.upgrade.activating.DEFAULT_MAX_QUERIES", - 5) -@mock.patch("dcmanager.orchestrator.states.upgrade.activating.DEFAULT_SLEEP_DURATION", - 1) -@mock.patch("dcmanager.orchestrator.states.upgrade.activating.MAX_FAILED_RETRIES", - 3) +@mock.patch( + "dcmanager.orchestrator.states.upgrade.activating.DEFAULT_MAX_QUERIES", 5) +@mock.patch( + "dcmanager.orchestrator.states.upgrade.activating.DEFAULT_SLEEP_DURATION", 1) +@mock.patch( + "dcmanager.orchestrator.states.upgrade.activating.MAX_FAILED_RETRIES", 3) class TestSwUpgradeActivatingStage(TestSwUpgradeState): def setUp(self): @@ -38,8 +39,8 @@ class TestSwUpgradeActivatingStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_ACTIVATING_UPGRADE) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_ACTIVATING_UPGRADE) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.upgrade_activate = mock.MagicMock() @@ -89,7 +90,9 @@ class TestSwUpgradeActivatingStage(TestSwUpgradeState): # verify the DB update was invoked updated_subcloud = db_api.subcloud_get(self.ctx, self.subcloud.id) - self.assertEqual(updated_subcloud.deploy_status, consts.DEPLOY_STATE_UPGRADE_ACTIVATED) + self.assertEqual( + updated_subcloud.deploy_status, consts.DEPLOY_STATE_UPGRADE_ACTIVATED + ) # On success, the state should be updated to the next state self.assert_step_updated(self.strategy_step.subcloud_id, diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_completing_upgrade.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_completing_upgrade.py index a5bf0a8d4..17ab4ff29 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_completing_upgrade.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_completing_upgrade.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dcmanager.common import consts @@ -11,18 +12,18 @@ from dcmanager.orchestrator.states.upgrade import completing from dcmanager.tests.unit.orchestrator.states.fakes import FakeSystem from dcmanager.tests.unit.orchestrator.states.fakes import FakeUpgrade from dcmanager.tests.unit.orchestrator.states.fakes import UPGRADED_VERSION -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState VALID_UPGRADE = FakeUpgrade(state='activation-complete') INVALID_UPGRADE = FakeUpgrade(state='aborting') UPGRADE_COMPLETING = FakeUpgrade(state='completing') -@mock.patch("dcmanager.orchestrator.states.upgrade.completing.DEFAULT_MAX_QUERIES", - 3) -@mock.patch("dcmanager.orchestrator.states.upgrade.completing.DEFAULT_SLEEP_DURATION", - 1) +@mock.patch( + "dcmanager.orchestrator.states.upgrade.completing.DEFAULT_MAX_QUERIES", 3) +@mock.patch( + "dcmanager.orchestrator.states.upgrade.completing.DEFAULT_SLEEP_DURATION", 1) class TestSwUpgradeCompletingStage(TestSwUpgradeState): def setUp(self): @@ -35,8 +36,8 @@ class TestSwUpgradeCompletingStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_COMPLETING_UPGRADE) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_COMPLETING_UPGRADE) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.upgrade_complete = mock.MagicMock() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_deleting_load.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_deleting_load.py index 8fa325a70..792ce2cb5 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_deleting_load.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_deleting_load.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -13,8 +14,8 @@ from dcmanager.tests.unit.orchestrator.states.fakes import FakeLoad from dcmanager.tests.unit.orchestrator.states.fakes import PREVIOUS_PREVIOUS_VERSION from dcmanager.tests.unit.orchestrator.states.fakes import PREVIOUS_VERSION from dcmanager.tests.unit.orchestrator.states.fakes import UPGRADED_VERSION -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState PREVIOUS_LOAD = FakeLoad(1, software_version=PREVIOUS_VERSION, state='imported') @@ -36,10 +37,10 @@ SUCCESS_DELETE_RESPONSE = { } -@mock.patch("dcmanager.orchestrator.states.upgrade.deleting_load.DEFAULT_MAX_QUERIES", - 3) -@mock.patch("dcmanager.orchestrator.states.upgrade.deleting_load.DEFAULT_SLEEP_DURATION", - 1) +@mock.patch("dcmanager.orchestrator.states.upgrade." + "deleting_load.DEFAULT_MAX_QUERIES", 3) +@mock.patch("dcmanager.orchestrator.states.upgrade." + "deleting_load.DEFAULT_SLEEP_DURATION", 1) class TestSwUpgradeDeletingLoadStage(TestSwUpgradeState): def setUp(self): @@ -52,8 +53,8 @@ class TestSwUpgradeDeletingLoadStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_DELETING_LOAD) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_DELETING_LOAD) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.get_loads = mock.MagicMock() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_finishing_patch_strategy.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_finishing_patch_strategy.py index c35a9c2c1..6e019324d 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_finishing_patch_strategy.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_finishing_patch_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2023 Wind River Systems, Inc. +# Copyright (c) 2020, 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dcmanager.common import consts @@ -57,8 +58,8 @@ class TestSwUpgradeFinishingPatchStrategyStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_FINISHING_PATCH_STRATEGY) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_FINISHING_PATCH_STRATEGY) # Add mock API endpoints for patching client calls invoked by this state self.patching_client.query = mock.MagicMock() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_importing_load.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_importing_load.py index 7587f66c0..45ab8f7ae 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_importing_load.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_importing_load.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2022 Wind River Systems, Inc. +# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -109,8 +110,8 @@ class TestSwUpgradeImportingLoadStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_IMPORTING_LOAD) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_IMPORTING_LOAD) # Mock the get_vault_load_files utility method p = mock.patch( @@ -185,7 +186,7 @@ class TestSwUpgradeImportingLoadStage(TestSwUpgradeState): self.on_success_state) def test_upgrade_sx_subcloud_import_load_vault_load_abort(self): - """Test import_load_metadata retry invoked and strategy continues as expected""" + """Import_load_metadata retry invoked and strategy continues as expected""" system_values = FakeSystem() system_values.system_mode = consts.SYSTEM_MODE_SIMPLEX self.sysinv_client.get_system.return_value = system_values @@ -211,7 +212,7 @@ class TestSwUpgradeImportingLoadStage(TestSwUpgradeState): self.on_success_state) def test_upgrade_subcloud_dx_importing_import_load_retry(self): - """Test importing load on AIO-DX where import_load HTTP error requires retry.""" + """Importing load on AIO-DX where import_load HTTP error requires retry.""" # Simulate the target load has not been imported yet on the subcloud self.sysinv_client.get_loads.return_value = DEST_LOAD_MISSING diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_installing_license.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_installing_license.py index ff6cf1104..e12943761 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_installing_license.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_installing_license.py @@ -1,14 +1,15 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dcmanager.common import consts -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState MISSING_LICENSE_RESPONSE = { u'content': u'', @@ -38,8 +39,8 @@ class TestSwUpgradeInstallingLicenseStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_INSTALLING_LICENSE) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_INSTALLING_LICENSE) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.get_license = mock.MagicMock() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_lock_controller.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_lock_controller.py index 8f6e710de..77f796c48 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_lock_controller.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_lock_controller.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -10,8 +11,8 @@ from dcmanager.common import consts from dcmanager.orchestrator.states import lock_host from dcmanager.tests.unit.orchestrator.states.fakes import FakeController -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState @mock.patch("dcmanager.orchestrator.states.lock_host.DEFAULT_MAX_QUERIES", 3) @@ -38,19 +39,20 @@ class TestSwUpgradeLockSimplexStage(TestSwUpgradeState): self.setup_fake_controllers('controller-0') def setup_fake_controllers(self, host_name): - self.CONTROLLER_UNLOCKED = FakeController(hostname=host_name, - administrative=consts.ADMIN_UNLOCKED) - self.CONTROLLER_LOCKED = FakeController(hostname=host_name, - administrative=consts.ADMIN_LOCKED) - self.CONTROLLER_LOCKING = FakeController(hostname=host_name, - administrative=consts.ADMIN_UNLOCKED, - ihost_action='lock', - task='Locking') - self.CONTROLLER_LOCKING_FAILED = \ - FakeController(hostname=host_name, - administrative=consts.ADMIN_UNLOCKED, - ihost_action='force-swact', - task='Swacting') + self.CONTROLLER_UNLOCKED = FakeController( + hostname=host_name, administrative=consts.ADMIN_UNLOCKED) + self.CONTROLLER_LOCKED = FakeController( + hostname=host_name, administrative=consts.ADMIN_LOCKED) + self.CONTROLLER_LOCKING = FakeController( + hostname=host_name, + administrative=consts.ADMIN_UNLOCKED, + ihost_action='lock', + task='Locking') + self.CONTROLLER_LOCKING_FAILED = FakeController( + hostname=host_name, + administrative=consts.ADMIN_UNLOCKED, + ihost_action='force-swact', + task='Swacting') def test_lock_success(self): """Test the lock command returns a success""" diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_migrating_data.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_migrating_data.py index 67f31f5f9..1467fbed6 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_migrating_data.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_migrating_data.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dcmanager.common import consts @@ -10,8 +11,8 @@ from dcmanager.db.sqlalchemy import api as db_api from dcmanager.orchestrator.states.upgrade import migrating_data from dcmanager.tests.unit.orchestrator.states.fakes import FakeController -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState CONTROLLER_0_LOCKED = FakeController(administrative=consts.ADMIN_LOCKED) CONTROLLER_0_UNLOCKING = \ @@ -42,8 +43,8 @@ class TestSwUpgradeMigratingDataStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_MIGRATING_DATA) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_MIGRATING_DATA) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.get_host = mock.MagicMock() @@ -53,8 +54,8 @@ class TestSwUpgradeMigratingDataStage(TestSwUpgradeState): # Simulate a failed subprocess call to the platform upgrade playbook # on the subcloud. - p = mock.patch( - 'dcmanager.orchestrator.states.upgrade.migrating_data.migrate_subcloud_data') + p = mock.patch("dcmanager.orchestrator.states.upgrade.migrating_data." + "migrate_subcloud_data") self.mock_platform_upgrade_call = p.start() self.mock_platform_upgrade_call.side_effect = Exception("Bad day!") self.addCleanup(p.stop) @@ -71,8 +72,8 @@ class TestSwUpgradeMigratingDataStage(TestSwUpgradeState): # Simulate a successful subprocess call to the platform upgrade playbook # on the subcloud. - p = mock.patch( - 'dcmanager.orchestrator.states.upgrade.migrating_data.migrate_subcloud_data') + p = mock.patch("dcmanager.orchestrator.states.upgrade.migrating_data." + "migrate_subcloud_data") self.mock_platform_upgrade_call = p.start() self.mock_platform_upgrade_call.return_value = 0 self.addCleanup(p.stop) @@ -149,8 +150,8 @@ class TestSwUpgradeMigratingDataStage(TestSwUpgradeState): # Simulate a successful subprocess call to the platform upgrade playbook # on the subcloud. - p = mock.patch( - 'dcmanager.orchestrator.states.upgrade.migrating_data.migrate_subcloud_data') + p = mock.patch("dcmanager.orchestrator.states.upgrade.migrating_data." + "migrate_subcloud_data") self.mock_platform_upgrade_call = p.start() self.mock_platform_upgrade_call.return_value = 0 self.addCleanup(p.stop) @@ -177,8 +178,8 @@ class TestSwUpgradeMigratingDataStage(TestSwUpgradeState): # Simulate a successful subprocess call to the platform upgrade playbook # on the subcloud. - p = mock.patch( - 'dcmanager.orchestrator.states.upgrade.migrating_data.migrate_subcloud_data') + p = mock.patch("dcmanager.orchestrator.states.upgrade.migrating_data." + "migrate_subcloud_data") self.mock_platform_upgrade_call = p.start() self.mock_platform_upgrade_call.return_value = 0 self.addCleanup(p.stop) diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_pre_check.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_pre_check.py index 017d6f4a5..8eb6d5d55 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_pre_check.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_pre_check.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020-2023 Wind River Systems, Inc. +# Copyright (c) 2020-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock from dccommon import consts as dccommon_consts @@ -16,139 +17,164 @@ from dcmanager.tests.unit.orchestrator.states.fakes import FakeController from dcmanager.tests.unit.orchestrator.states.fakes import FakeHostFilesystem from dcmanager.tests.unit.orchestrator.states.fakes import FakeSystem from dcmanager.tests.unit.orchestrator.states.fakes import FakeUpgrade -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState CONTROLLER_0_LOCKED = FakeController(administrative=consts.ADMIN_LOCKED) CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED = FakeHostFilesystem(size=16) CONTROLLER_0_HOST_FS_SCRATCH_UNDER_SIZED = FakeHostFilesystem(size=15) -CONTROLLER_0_LOCKED_AND_STANDBY = FakeController(administrative=consts.ADMIN_LOCKED, - capabilities={"Personality": "Controller-Standby"}) -CONTROLLER_0_UNLOCKED_AND_STANDBY = FakeController(administrative=consts.ADMIN_UNLOCKED, - capabilities={"Personality": "Controller-Standby"}) -CONTROLLER_0_UNLOCKED_AND_ACTIVE = FakeController(administrative=consts.ADMIN_UNLOCKED) -CONTROLLER_0_NOT_UPGRADED = FakeController(administrative=consts.ADMIN_UNLOCKED, - capabilities={"Personality": "Controller-Standby"}) -CONTROLLER_0_UPGRADED_STANDBY = FakeController(administrative=consts.ADMIN_UNLOCKED, - capabilities={"Personality": "Controller-Standby"}, - software_load='56.78') -CONTROLLER_0_UPGRADED_ACTIVE = FakeController(administrative=consts.ADMIN_UNLOCKED, - software_load='56.78') -CONTROLLER_1_LOCKED_AND_STANDBY = FakeController(host_id=2, - hostname='controller-1', - administrative=consts.ADMIN_LOCKED, - capabilities={"Personality": "Controller-Standby"}) -CONTROLLER_1_UNLOCKED_AND_STANDBY = FakeController(host_id=2, - hostname='controller-1', - administrative=consts.ADMIN_UNLOCKED, - capabilities={"Personality": "Controller-Standby"}) -CONTROLLER_1_UNLOCKED_AND_ACTIVE = FakeController(host_id=2, - hostname='controller-1', - administrative=consts.ADMIN_UNLOCKED) -CONTROLLER_1_NOT_UPGRADED = FakeController(host_id=2, - hostname='controller-1', - administrative=consts.ADMIN_UNLOCKED) -CONTROLLER_1_UPGRADED_ACTIVE = FakeController(host_id=2, - hostname='controller-1', - administrative=consts.ADMIN_UNLOCKED, - software_load='56.78') -CONTROLLER_1_UPGRADED_STANDBY = FakeController(host_id=2, - hostname='controller-1', - administrative=consts.ADMIN_UNLOCKED, - software_load='56.78', - capabilities={"Personality": "Controller-Standby"}) -SYSTEM_HEALTH_UPGRADE_RESPONSE_SUCCESS = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [OK]\n" \ - "All kubernetes nodes are ready: [OK]\n" \ - "All kubernetes control plane pods are ready: [OK]\n" \ - "Active kubernetes version is the latest supported version: [OK]\n" \ +CONTROLLER_0_LOCKED_AND_STANDBY = FakeController( + administrative=consts.ADMIN_LOCKED, + capabilities={"Personality": "Controller-Standby"}, +) +CONTROLLER_0_UNLOCKED_AND_STANDBY = FakeController( + administrative=consts.ADMIN_UNLOCKED, + capabilities={"Personality": "Controller-Standby"}, +) +CONTROLLER_0_UNLOCKED_AND_ACTIVE = FakeController( + administrative=consts.ADMIN_UNLOCKED +) +CONTROLLER_0_NOT_UPGRADED = FakeController( + administrative=consts.ADMIN_UNLOCKED, + capabilities={"Personality": "Controller-Standby"}, +) +CONTROLLER_0_UPGRADED_STANDBY = FakeController( + administrative=consts.ADMIN_UNLOCKED, + capabilities={"Personality": "Controller-Standby"}, + software_load="56.78", +) +CONTROLLER_0_UPGRADED_ACTIVE = FakeController( + administrative=consts.ADMIN_UNLOCKED, software_load="56.78" +) +CONTROLLER_1_LOCKED_AND_STANDBY = FakeController( + host_id=2, + hostname="controller-1", + administrative=consts.ADMIN_LOCKED, + capabilities={"Personality": "Controller-Standby"}, +) +CONTROLLER_1_UNLOCKED_AND_STANDBY = FakeController( + host_id=2, + hostname="controller-1", + administrative=consts.ADMIN_UNLOCKED, + capabilities={"Personality": "Controller-Standby"}, +) +CONTROLLER_1_UNLOCKED_AND_ACTIVE = FakeController( + host_id=2, hostname="controller-1", administrative=consts.ADMIN_UNLOCKED +) +CONTROLLER_1_NOT_UPGRADED = FakeController( + host_id=2, hostname="controller-1", administrative=consts.ADMIN_UNLOCKED +) +CONTROLLER_1_UPGRADED_ACTIVE = FakeController( + host_id=2, + hostname="controller-1", + administrative=consts.ADMIN_UNLOCKED, + software_load="56.78", +) +CONTROLLER_1_UPGRADED_STANDBY = FakeController( + host_id=2, + hostname="controller-1", + administrative=consts.ADMIN_UNLOCKED, + software_load="56.78", + capabilities={"Personality": "Controller-Standby"}, +) +SYSTEM_HEALTH_UPGRADE_RESPONSE_SUCCESS = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [OK]\n" + "All kubernetes nodes are ready: [OK]\n" + "All kubernetes control plane pods are ready: [OK]\n" + "Active kubernetes version is the latest supported version: [OK]\n" "No imported load found. Unable to test further" +) -SYSTEM_HEALTH_UPGRADE_RESPONSE_NON_MGMT_AFFECTING_ALARMS = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [Fail]\n" \ - "[4] alarms found, [0] of which are management affecting\n" \ - "All kubernetes nodes are ready: [OK]\n" \ - "All kubernetes control plane pods are ready: [OK]\n" \ - "Active kubernetes version is the latest supported version: [OK]\n" \ +SYSTEM_HEALTH_UPGRADE_RESPONSE_NON_MGMT_AFFECTING_ALARMS = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [Fail]\n" + "[4] alarms found, [0] of which are management affecting\n" + "All kubernetes nodes are ready: [OK]\n" + "All kubernetes control plane pods are ready: [OK]\n" + "Active kubernetes version is the latest supported version: [OK]\n" "No imported load found. Unable to test further" +) -SYSTEM_HEALTH_UPGRADE_RESPONSE_MGMT_AFFECTING_ALARM = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [Fail]\n" \ - "[1] alarms found, [1] of which are management affecting\n" \ - "All kubernetes nodes are ready: [OK]\n" \ - "All kubernetes control plane pods are ready: [OK]\n" \ - "Active kubernetes version is the latest supported version: [OK]\n" \ +SYSTEM_HEALTH_UPGRADE_RESPONSE_MGMT_AFFECTING_ALARM = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [Fail]\n" + "[1] alarms found, [1] of which are management affecting\n" + "All kubernetes nodes are ready: [OK]\n" + "All kubernetes control plane pods are ready: [OK]\n" + "Active kubernetes version is the latest supported version: [OK]\n" "No imported load found. Unable to test further" +) -SYSTEM_HEALTH_UPGRADE_RESPONSE_MULTIPLE_FAILED_HEALTH_CHECKS = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [Fail]\n" \ - "[1] alarms found, [0] of which are management affecting\n" \ - "All kubernetes nodes are ready: [Fail]\n" \ - "Kubernetes nodes not ready: controller-0\n" \ - "All kubernetes control plane pods are ready: [Fail]\n" \ - "Kubernetes control plane pods not ready: kube-apiserver-controller-0\n" \ - "Active kubernetes version is the latest supported version: [OK]\n" \ +SYSTEM_HEALTH_UPGRADE_RESPONSE_MULTIPLE_FAILED_HEALTH_CHECKS = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [Fail]\n" + "[1] alarms found, [0] of which are management affecting\n" + "All kubernetes nodes are ready: [Fail]\n" + "Kubernetes nodes not ready: controller-0\n" + "All kubernetes control plane pods are ready: [Fail]\n" + "Kubernetes control plane pods not ready: kube-apiserver-controller-0\n" + "Active kubernetes version is the latest supported version: [OK]\n" "No imported load found. Unable to test further" +) -SYSTEM_HEALTH_UPGRADE_RESPONSE_K8S_FAILED_HEALTH_CHECKS = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [OK]\n" \ - "All kubernetes nodes are ready: [Fail]\n" \ - "All kubernetes control plane pods are ready: [OK]\n" \ - "Active kubernetes version is the latest supported version: [OK]\n" \ +SYSTEM_HEALTH_UPGRADE_RESPONSE_K8S_FAILED_HEALTH_CHECKS = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [OK]\n" + "All kubernetes nodes are ready: [Fail]\n" + "All kubernetes control plane pods are ready: [OK]\n" + "Active kubernetes version is the latest supported version: [OK]\n" "No imported load found. Unable to test further" +) -SYSTEM_HEALTH_UPGRADE_RESPONSE_FAILED_ACTIVE_K8S_VERSION_CHECK = \ - "System Health:\n" \ - "All hosts are provisioned: [OK]\n" \ - "All hosts are unlocked/enabled: [OK]\n" \ - "All hosts have current configurations: [OK]\n" \ - "All hosts are patch current: [OK]\n" \ - "Ceph Storage Healthy: [OK]\n" \ - "No alarms: [OK]\n" \ - "All kubernetes nodes are ready: [OK]\n" \ - "All kubernetes control plane pods are ready: [OK]\n" \ - "Active kubernetes version is the latest supported version: [Fail]\n" \ - "Upgrade kubernetes to the latest version: [v1.26.1]. See \"system kube-version-list\"\n" \ +SYSTEM_HEALTH_UPGRADE_RESPONSE_FAILED_ACTIVE_K8S_VERSION_CHECK = ( + "System Health:\n" + "All hosts are provisioned: [OK]\n" + "All hosts are unlocked/enabled: [OK]\n" + "All hosts have current configurations: [OK]\n" + "All hosts are patch current: [OK]\n" + "Ceph Storage Healthy: [OK]\n" + "No alarms: [OK]\n" + "All kubernetes nodes are ready: [OK]\n" + "All kubernetes control plane pods are ready: [OK]\n" + "Active kubernetes version is the latest supported version: [Fail]\n" + 'Upgrade kubernetes to the latest version: [v1.26.1]. ' + 'See "system kube-version-list"\n' "No imported load found. Unable to test further" +) -UPGRADE_STARTED = FakeUpgrade(state='started') +UPGRADE_STARTED = FakeUpgrade(state="started") -UPGRADE_ALARM = FakeAlarm('900.005', 'True') -HOST_LOCKED_ALARM = FakeAlarm('200.001', 'True') +UPGRADE_ALARM = FakeAlarm("900.005", "True") +HOST_LOCKED_ALARM = FakeAlarm("200.001", "True") class TestSwUpgradePreCheckStage(TestSwUpgradeState): - def setUp(self): super(TestSwUpgradePreCheckStage, self).setUp() @@ -157,8 +183,9 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_PRE_CHECK) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_PRE_CHECK + ) self.sysinv_client.get_host = mock.MagicMock() self.sysinv_client.get_host_filesystem = mock.MagicMock() @@ -178,15 +205,17 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - self.sysinv_client.get_host_filesystem.side_effect = \ - [CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED] + self.sysinv_client.get_host_filesystem.side_effect = [ + CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED + ] - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_SUCCESS + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -198,10 +227,13 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_host_filesystem.assert_called() # Verify the expected next state happened (installing license) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_INSTALLING_LICENSE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_INSTALLING_LICENSE + ) - def test_upgrade_pre_check_subcloud_online_fresh_with_non_management_alarms(self): + def test_upgrade_pre_check_subcloud_online_fresh_with_non_management_alarms( + self, + ): """Test pre check step where the subcloud is online with non mgmt alarms The pre-check should transition in this scenario to the first state @@ -209,15 +241,17 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - self.sysinv_client.get_host_filesystem.side_effect = \ - [CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED] + self.sysinv_client.get_host_filesystem.side_effect = [ + CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED + ] - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_NON_MGMT_AFFECTING_ALARMS + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -229,11 +263,14 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_host_filesystem.assert_called() # Verify the expected next state happened (installing license) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_INSTALLING_LICENSE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_INSTALLING_LICENSE + ) - def test_upgrade_pre_check_subcloud_online_host_locked_upgrade_started_mgmt_alarms(self): - """Test pre check step where the subcloud is online, locked and upgrade has started. + def test_upgrade_pre_check_sc_online_host_locked_upgrade_started_mgmt_alarms( + self, + ): + """Precheck step where the subcloud is online/locked/upgrade has started. The pre-check should move to the next step as the upgrade alarm can be ignored and the host locked alarm can also be ignored if upgrade has @@ -241,23 +278,30 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) # subcloud is locked self.sysinv_client.get_host.side_effect = [CONTROLLER_0_LOCKED] # upgrade has started - self.sysinv_client.get_upgrades.return_value = [UPGRADE_STARTED, ] + self.sysinv_client.get_upgrades.return_value = [ + UPGRADE_STARTED, + ] - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_MGMT_AFFECTING_ALARM + ) - self.fm_client.get_alarms.return_value = [UPGRADE_ALARM, HOST_LOCKED_ALARM, ] + self.fm_client.get_alarms.return_value = [ + UPGRADE_ALARM, + HOST_LOCKED_ALARM, + ] - self.sysinv_client.get_host_filesystem.side_effect = \ - [CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED] + self.sysinv_client.get_host_filesystem.side_effect = [ + CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -272,11 +316,14 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_host_filesystem.assert_called() # Verify the expected next state happened (installing license) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_INSTALLING_LICENSE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_INSTALLING_LICENSE + ) - def test_upgrade_pre_check_subcloud_online_host_locked_no_upgrade_mgmt_alarms(self): - """Test pre check step where subcloud is online, locked and upgrade has not started. + def test_upgrade_pre_check_subcloud_online_host_locked_no_upgrade_mgmt_alarms( + self, + ): + """Precheck step where subcloud is online/locked/upgrade has not started. The pre-check should raise an exception and transition to the failed state as host locked alarm cannot be skipped if upgrade has @@ -284,19 +331,22 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) # subcloud is locked self.sysinv_client.get_host.side_effect = [CONTROLLER_0_LOCKED] self.sysinv_client.get_upgrades.return_value = [] - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_MGMT_AFFECTING_ALARM + ) - self.fm_client.get_alarms.return_value = [HOST_LOCKED_ALARM, ] + self.fm_client.get_alarms.return_value = [ + HOST_LOCKED_ALARM, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -308,8 +358,9 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.fm_client.get_alarms.assert_called() # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_online_multiple_failed_health_checks(self): """Test pre check step where the subcloud is online but is unhealthy @@ -320,12 +371,13 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_MULTIPLE_FAILED_HEALTH_CHECKS + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -334,8 +386,9 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_system_health_upgrade.assert_called() # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_online_active_k8s_version_check_failed(self): """Test pre check step where the subcloud is online but is unhealthy @@ -346,12 +399,13 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_FAILED_ACTIVE_K8S_VERSION_CHECK + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -360,8 +414,9 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_system_health_upgrade.assert_called() # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_online_failed_health_checks_no_alarms(self): """Test pre check step where the subcloud is online but is unhealthy @@ -372,12 +427,13 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_K8S_FAILED_HEALTH_CHECKS + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -386,8 +442,9 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_system_health_upgrade.assert_called() # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_online_scratch_undersized(self): """Test pre check step where the subcloud is online undersized scratch @@ -398,15 +455,17 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): """ # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - self.sysinv_client.get_host_filesystem.side_effect = \ - [CONTROLLER_0_HOST_FS_SCRATCH_UNDER_SIZED] + self.sysinv_client.get_host_filesystem.side_effect = [ + CONTROLLER_0_HOST_FS_SCRATCH_UNDER_SIZED + ] - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_SUCCESS + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -418,8 +477,9 @@ class TestSwUpgradePreCheckStage(TestSwUpgradeState): self.sysinv_client.get_host_filesystem.assert_called() # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): @@ -427,7 +487,8 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): def test_upgrade_pre_check_subcloud_online_activated(self): """Test pre check step where the subcloud is online and running N+1 load - The pre-check in this scenario should advance directly to 'completing upgrade'. + The pre-check in this scenario should advance directly to + 'completing upgrade'. """ # Update the subcloud to have deploy state as "migrated" @@ -449,23 +510,30 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): consts.STRATEGY_STATE_COMPLETING_UPGRADE) def test_upgrade_pre_check_subcloud_online_migrate_failed(self): - """Test pre check step where the subcloud is online following an unlock timeout + """Pre check step where the subcloud is online following an unlock timeout - The pre-check in this scenario should advance directly to 'activating upgrade'. + The pre-check in this scenario should advance directly to + 'activating upgrade'. """ # Update the subcloud to have deploy state as "data-migration-failed" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DATA_MIGRATION_FAILED) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_DATA_MIGRATION_FAILED, + ) - self.sysinv_client.get_system_health_upgrade.return_value = \ + self.sysinv_client.get_system_health_upgrade.return_value = ( SYSTEM_HEALTH_UPGRADE_RESPONSE_MGMT_AFFECTING_ALARM + ) - self.fm_client.get_alarms.return_value = [UPGRADE_ALARM, ] + self.fm_client.get_alarms.return_value = [ + UPGRADE_ALARM, + ] - self.sysinv_client.get_host_filesystem.side_effect = \ - [CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED] + self.sysinv_client.get_host_filesystem.side_effect = [ + CONTROLLER_0_HOST_FS_SCRATCH_MIN_SIZED + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -477,18 +545,23 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): self.sysinv_client.get_host_filesystem.assert_called() # verify the DB update was invoked - updated_subcloud = db_api.subcloud_get(self.ctx, - self.subcloud.id) - self.assertEqual(updated_subcloud.deploy_status, consts.DEPLOY_STATE_MIGRATED) + updated_subcloud = db_api.subcloud_get(self.ctx, self.subcloud.id) + self.assertEqual( + updated_subcloud.deploy_status, consts.DEPLOY_STATE_MIGRATED + ) # Verify the expected next state happened (activating upgrade) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_ACTIVATING_UPGRADE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_ACTIVATING_UPGRADE + ) def test_upgrade_pre_check_subcloud_online_migrated(self): - """Test pre check step where the subcloud is online following an activation failure + """Test pre check step where the subcloud is online following an activation - The pre-check in this scenario should advance directly to 'activating upgrade'. + failure + + The pre-check in this scenario should advance directly to + 'activating upgrade'. """ # Update the subcloud to have deploy state as "migrated" @@ -527,15 +600,18 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): ) # Update the subcloud to be online - db_api.subcloud_update(self.ctx, - self.subcloud.id, - availability_status=dccommon_consts.AVAILABILITY_ONLINE) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + availability_status=dccommon_consts.AVAILABILITY_ONLINE, + ) # Create a fake strategy fake_strategy.create_fake_strategy_step( self.ctx, subcloud_id=self.subcloud.id, - state=consts.STRATEGY_STATE_PRE_CHECK) + state=consts.STRATEGY_STATE_PRE_CHECK, + ) self.strategy_step = db_api.strategy_step_get(self.ctx, self.subcloud.id) @@ -543,8 +619,9 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): self.worker.perform_state_action(self.strategy_step) # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_online_host_locked_pre_install_failed(self): """Test pre check step where the subcloud is locked and install-failed @@ -556,9 +633,11 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): """ # Update the subcloud to have deploy state as "pre-install-failed" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED, + ) # subcloud is locked self.sysinv_client.get_host.side_effect = [CONTROLLER_0_LOCKED] @@ -567,8 +646,9 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_UPGRADING_SIMPLEX) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_UPGRADING_SIMPLEX + ) def test_upgrade_pre_check_subcloud_online_host_locked_install_failed(self): """Test pre check step where the subcloud is locked and install-failed @@ -580,9 +660,11 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): """ # Update the subcloud to have deploy state as "install-failed" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED, + ) # subcloud is locked self.sysinv_client.get_host.side_effect = [CONTROLLER_0_LOCKED] @@ -591,8 +673,9 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_UPGRADING_SIMPLEX) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_UPGRADING_SIMPLEX + ) def test_upgrade_pre_check_subcloud_offline_no_data_install(self): """Test pre check step where the subcloud is offline without data install. @@ -609,13 +692,14 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): name=base.SUBCLOUD_2['name'], region_name=base.SUBCLOUD_2['region_name'], data_install=None, - deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED + deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED, ) fake_strategy.create_fake_strategy_step( self.ctx, subcloud_id=self.subcloud.id, - state=consts.STRATEGY_STATE_PRE_CHECK) + state=consts.STRATEGY_STATE_PRE_CHECK, + ) self.strategy_step = db_api.strategy_step_get(self.ctx, self.subcloud.id) @@ -623,8 +707,9 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): self.worker.perform_state_action(self.strategy_step) # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_jumps_to_migrating(self): """Test pre check step which jumps to the migrating data state @@ -636,17 +721,20 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): # Update the subcloud to have deploy state as "installed", # and availability status as "offline" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_INSTALLED, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_INSTALLED, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (migrating data) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_MIGRATING_DATA) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_MIGRATING_DATA + ) def test_upgrade_pre_check_subcloud_jumps_to_activating(self): """Test pre check step which jumps to activating upgrade state @@ -658,10 +746,11 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): # Update the subcloud to have deploy state as "migrated", # and availability status as "offline" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_MIGRATED, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_MIGRATED, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) @@ -680,17 +769,20 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): # Update the subcloud to have deploy state as "data-migration-failed", # and availability status as "offline" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DATA_MIGRATION_FAILED, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_DATA_MIGRATION_FAILED, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_UPGRADING_SIMPLEX) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_UPGRADING_SIMPLEX + ) def test_upgrade_pre_check_subcloud_cannot_proceed(self): """Test pre check step which requires manual intervention to proceed @@ -702,21 +794,23 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage): # Update the subcloud to have deploy state as "bootstrap-failed", # and availability status as "offline" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_BOOTSTRAP_FAILED, - availability_status=dccommon_consts.AVAILABILITY_OFFLINE) + db_api.subcloud_update( + self.ctx, + self.subcloud.id, + deploy_status=consts.DEPLOY_STATE_BOOTSTRAP_FAILED, + availability_status=dccommon_consts.AVAILABILITY_OFFLINE, + ) # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): - def setUp(self): super(TestSwUpgradePreCheckDuplexStage, self).setUp() self.sysinv_client.get_hosts = mock.MagicMock() @@ -725,12 +819,14 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): self.sysinv_client.get_system.return_value = system_values self.sysinv_client.get_upgrades.return_value = [] # Update the subcloud to have deploy state as "complete" - db_api.subcloud_update(self.ctx, - self.subcloud.id, - deploy_status=consts.DEPLOY_STATE_DONE) + db_api.subcloud_update( + self.ctx, self.subcloud.id, deploy_status=consts.DEPLOY_STATE_DONE + ) - def test_upgrade_pre_check_subcloud_online_host_locked_upgrade_started_mgmt_alarms(self): - """Test pre check step where the subcloud is online, locked and upgrade has started + def test_upgrade_pre_check_sc_online_host_locked_upgrade_started_mgmt_alarms( + self, + ): + """Pre check step where the subcloud is online/locked/upgrade has started The pre-check should move to the next step as the upgrade alarm can be ignored and the host locked alarm can also be ignored if upgrade has @@ -739,19 +835,24 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # subcloud's controller-0 is unlocked and active # subcloud's controller-1 is locked and standby - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UNLOCKED_AND_ACTIVE, - CONTROLLER_1_LOCKED_AND_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UNLOCKED_AND_ACTIVE, + CONTROLLER_1_LOCKED_AND_STANDBY, + ] # upgrade has started - upgrade = FakeUpgrade(state='started') - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + upgrade = FakeUpgrade(state="started") + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (installing license) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_INSTALLING_LICENSE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_INSTALLING_LICENSE + ) def test_upgrade_pre_check_subcloud_data_migration_failed(self): """Test pre check step where the subcloud's controller-1 is locked and @@ -767,19 +868,24 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is data-migration-failed upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_DATA_MIGRATION_FAILED) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked and active # subcloud's controller-1 is locked and standby - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UNLOCKED_AND_ACTIVE, - CONTROLLER_1_LOCKED_AND_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UNLOCKED_AND_ACTIVE, + CONTROLLER_1_LOCKED_AND_STANDBY, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_subcloud_in_data_migration_upgrade_state(self): """Test pre check step where the subcloud's controller-1 is locked and @@ -795,14 +901,17 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is data-migration upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_DATA_MIGRATION) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the exception caused the state to go to failed - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_FAILED) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED + ) def test_upgrade_pre_check_jumps_to_unlock_controller_1(self): """Test pre check step which jumps to unlock controller-1 state. @@ -814,19 +923,25 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is upgrading-controllers upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_UPGRADING_CONTROLLERS) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked and active # subcloud's controller-1 is locked and standby - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UNLOCKED_AND_ACTIVE, - CONTROLLER_1_LOCKED_AND_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UNLOCKED_AND_ACTIVE, + CONTROLLER_1_LOCKED_AND_STANDBY, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1) + self.assert_step_updated( + self.strategy_step.subcloud_id, + consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1, + ) def test_upgrade_pre_check_jumps_to_swacting_to_controller_1(self): """Test pre check step which jumps to swacting to controller-1 state. @@ -838,19 +953,25 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is upgrading-controllers upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_UPGRADING_CONTROLLERS) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked and active # subcloud's controller-1 is unlocked and standby - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UNLOCKED_AND_ACTIVE, - CONTROLLER_1_UNLOCKED_AND_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UNLOCKED_AND_ACTIVE, + CONTROLLER_1_UNLOCKED_AND_STANDBY, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_1) + self.assert_step_updated( + self.strategy_step.subcloud_id, + consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_1, + ) def test_upgrade_pre_check_jumps_to_creating_vim_strategy(self): """Test pre check step which jumps to creating vim startegy state. @@ -862,19 +983,25 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is upgrading-controllers upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_UPGRADING_CONTROLLERS) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked and active # subcloud's controller-1 is unlocked and standby - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UNLOCKED_AND_STANDBY, - CONTROLLER_1_UNLOCKED_AND_ACTIVE] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UNLOCKED_AND_STANDBY, + CONTROLLER_1_UNLOCKED_AND_ACTIVE, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY) + self.assert_step_updated( + self.strategy_step.subcloud_id, + consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY, + ) def test_upgrade_pre_check_subcloud_some_hosts_not_upgraded(self): """Test pre check step which jumps to creating vim strategy state. @@ -886,22 +1013,30 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is upgrading-hosts upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_UPGRADING_HOSTS) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked, standby and not upgraded # subcloud's controller-1 is unlocked, active and upgraded - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_NOT_UPGRADED, - CONTROLLER_1_UPGRADED_ACTIVE] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_NOT_UPGRADED, + CONTROLLER_1_UPGRADED_ACTIVE, + ] - self.sysinv_client.get_hosts.return_value = [CONTROLLER_0_NOT_UPGRADED, - CONTROLLER_1_UPGRADED_ACTIVE, ] + self.sysinv_client.get_hosts.return_value = [ + CONTROLLER_0_NOT_UPGRADED, + CONTROLLER_1_UPGRADED_ACTIVE, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY) + self.assert_step_updated( + self.strategy_step.subcloud_id, + consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY, + ) def test_upgrade_pre_check_jumps_to_swacting_to_controller_0(self): """Test pre check step which jumps to swacting to controller-0 state. @@ -914,22 +1049,30 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is upgrading-hosts upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_UPGRADING_HOSTS) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked, standby and upgraded # subcloud's controller-1 is unlocked, active and upgraded - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UPGRADED_STANDBY, - CONTROLLER_1_UPGRADED_ACTIVE] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UPGRADED_STANDBY, + CONTROLLER_1_UPGRADED_ACTIVE, + ] - self.sysinv_client.get_hosts.return_value = [CONTROLLER_0_UPGRADED_STANDBY, - CONTROLLER_1_UPGRADED_ACTIVE, ] + self.sysinv_client.get_hosts.return_value = [ + CONTROLLER_0_UPGRADED_STANDBY, + CONTROLLER_1_UPGRADED_ACTIVE, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0) + self.assert_step_updated( + self.strategy_step.subcloud_id, + consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0, + ) def test_upgrade_pre_check_jumps_to_activating_upgrade(self): """Test pre check step which jumps to activating upgrade state. @@ -942,22 +1085,29 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is upgrading-hosts upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_UPGRADING_HOSTS) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked, active and upgraded # subcloud's controller-1 is unlocked, standby and upgraded - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UPGRADED_ACTIVE, - CONTROLLER_1_UPGRADED_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UPGRADED_ACTIVE, + CONTROLLER_1_UPGRADED_STANDBY, + ] - self.sysinv_client.get_hosts.return_value = [CONTROLLER_0_UPGRADED_ACTIVE, - CONTROLLER_1_UPGRADED_STANDBY, ] + self.sysinv_client.get_hosts.return_value = [ + CONTROLLER_0_UPGRADED_ACTIVE, + CONTROLLER_1_UPGRADED_STANDBY, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_ACTIVATING_UPGRADE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_ACTIVATING_UPGRADE + ) def test_upgrade_pre_check_activation_failed_controller_0_active(self): """Test pre check step which jumps to activating upgrade state. @@ -970,19 +1120,24 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is activation-failed upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_ACTIVATION_FAILED) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked, active and upgraded # subcloud's controller-1 is unlocked, standby and upgraded - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UPGRADED_ACTIVE, - CONTROLLER_1_UPGRADED_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UPGRADED_ACTIVE, + CONTROLLER_1_UPGRADED_STANDBY, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_ACTIVATING_UPGRADE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_ACTIVATING_UPGRADE + ) def test_upgrade_pre_check_activation_failed_controller_1_active(self): """Test pre check step which jumps to activating upgrade state. @@ -995,19 +1150,25 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is activation-failed upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_ACTIVATION_FAILED) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked, standby and upgraded # subcloud's controller-1 is unlocked, active and upgraded - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UPGRADED_STANDBY, - CONTROLLER_1_UPGRADED_ACTIVE] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UPGRADED_STANDBY, + CONTROLLER_1_UPGRADED_ACTIVE, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0) + self.assert_step_updated( + self.strategy_step.subcloud_id, + consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0, + ) def test_upgrade_pre_check_jumps_to_completing_upgrade_state(self): """Test pre check step which jumps to completing upgrade state. @@ -1020,16 +1181,21 @@ class TestSwUpgradePreCheckDuplexStage(TestSwUpgradePreCheckStage): # upgrade state is activation-complete upgrade = FakeUpgrade(state=consts.UPGRADE_STATE_ACTIVATION_COMPLETE) - self.sysinv_client.get_upgrades.return_value = [upgrade, ] + self.sysinv_client.get_upgrades.return_value = [ + upgrade, + ] # subcloud's controller-0 is unlocked, active and upgraded # subcloud's controller-1 is unlocked, standby and upgraded - self.sysinv_client.get_host.side_effect = [CONTROLLER_0_UPGRADED_ACTIVE, - CONTROLLER_1_UPGRADED_STANDBY] + self.sysinv_client.get_host.side_effect = [ + CONTROLLER_0_UPGRADED_ACTIVE, + CONTROLLER_1_UPGRADED_STANDBY, + ] # invoke the strategy state operation on the orch thread self.worker.perform_state_action(self.strategy_step) # Verify the expected next state happened (upgrading) - self.assert_step_updated(self.strategy_step.subcloud_id, - consts.STRATEGY_STATE_COMPLETING_UPGRADE) + self.assert_step_updated( + self.strategy_step.subcloud_id, consts.STRATEGY_STATE_COMPLETING_UPGRADE + ) diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_starting_upgrade.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_starting_upgrade.py index f15259e7c..90d317ce5 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_starting_upgrade.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_starting_upgrade.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -11,8 +12,8 @@ from dcmanager.orchestrator.states.upgrade import starting_upgrade from dcmanager.tests.unit.orchestrator.states.fakes import FakeSystem from dcmanager.tests.unit.orchestrator.states.fakes import FakeUpgrade -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState UPGRADE_ABORTING = FakeUpgrade(state='aborting') UPGRADE_STARTING = FakeUpgrade(state='starting') @@ -37,8 +38,8 @@ class TestSwUpgradeSimplexStartingUpgradeStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_STARTING_UPGRADE) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_STARTING_UPGRADE) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.upgrade_start = mock.MagicMock() @@ -209,7 +210,8 @@ class TestSwUpgradeSimplexStartingUpgradeStage(TestSwUpgradeState): ".DEFAULT_MAX_QUERIES", 3) @mock.patch("dcmanager.orchestrator.states.upgrade.starting_upgrade" ".DEFAULT_SLEEP_DURATION", 1) -class TestSwUpgradeDuplexStartingUpgradeStage(TestSwUpgradeSimplexStartingUpgradeStage): +class TestSwUpgradeDuplexStartingUpgradeStage( + TestSwUpgradeSimplexStartingUpgradeStage): def setUp(self): super(TestSwUpgradeDuplexStartingUpgradeStage, self).setUp() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_swact_controller.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_swact_controller.py index 6cb3d417f..7b61682ef 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_swact_controller.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_swact_controller.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2021-2022 Wind River Systems, Inc. +# Copyright (c) 2021-2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import itertools import mock @@ -43,10 +44,10 @@ class TestSwUpgradeSwactToController0Stage(TestSwUpgradeState): def setup_fake_controllers(self, host_name): self.CONTROLLER_ACTIVE = FakeController(hostname=host_name) - self.CONTROLLER_STANDBY = FakeController(hostname=host_name, - capabilities={"Personality": "Controller-Standby"}) - self.CONTROLLER_SWACTING = FakeController(hostname=host_name, - task='Swacting') + self.CONTROLLER_STANDBY = FakeController( + hostname=host_name, capabilities={"Personality": "Controller-Standby"}) + self.CONTROLLER_SWACTING = FakeController( + hostname=host_name, task='Swacting') def test_swact_success(self): """Test the swact command returns a success""" @@ -89,7 +90,7 @@ class TestSwUpgradeSwactToController0Stage(TestSwUpgradeState): self.on_success_state) def test_swact_attempt_timeout(self): - """Test swact invoked and fails if timeout before host becomes active controller""" + """Test swact invoked and fails if timeout before host becomes active""" # mock the get_host queries # all remaining queries, the host returns 'Controller-Standby' diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_updating_patches.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_updating_patches.py index 5c6bb2fff..bca488f59 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_updating_patches.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_updating_patches.py @@ -1,13 +1,14 @@ # -# Copyright (c) 2020, 2023 Wind River Systems, Inc. +# Copyright (c) 2020, 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import mock + from os import path as os_path -from dcmanager.common import consts +import mock +from dcmanager.common import consts from dcmanager.tests.unit.orchestrator.states.fakes import FakeLoad from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ import TestSwUpgradeState @@ -97,10 +98,11 @@ class TestSwUpgradeUpdatingPatchesStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_UPDATING_PATCHES) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_UPDATING_PATCHES) - # Add mock API endpoints for patching and sysinv client calls invoked by this state + # Add mock API endpoints for patching and sysinv client calls invoked by + # this state self.patching_client.query = mock.MagicMock() self.sysinv_client.get_loads = mock.MagicMock() self.patching_client.remove = mock.MagicMock() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_duplex.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_duplex.py index 28beec5e7..492270a8c 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_duplex.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_duplex.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -10,8 +10,8 @@ from dcmanager.common import consts from dcmanager.orchestrator.states.upgrade import upgrading_duplex from dcmanager.tests.unit.orchestrator.states.fakes import FakeUpgrade -from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ - import TestSwUpgradeState +from dcmanager.tests.unit.orchestrator.states.upgrade.test_base import \ + TestSwUpgradeState UPGRADE_ABORTING = FakeUpgrade(state='aborting') UPGRADE_STARTED = FakeUpgrade(state='started') @@ -35,8 +35,8 @@ class TestSwUpgradeUpgradingDuplexStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_UPGRADING_DUPLEX) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_UPGRADING_DUPLEX) # Add mock API endpoints for sysinv client calls invoked by this state self.sysinv_client.get_host = mock.MagicMock() diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_simplex.py b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_simplex.py index 31dfbac8a..520603308 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_simplex.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/states/upgrade/test_upgrading_simplex.py @@ -1,20 +1,19 @@ # -# Copyright (c) 2020, 2022 Wind River Systems, Inc. +# Copyright (c) 2020, 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import mock +from tsconfig.tsconfig import SW_VERSION from dcmanager.common import consts from dcmanager.db.sqlalchemy import api as db_api - from dcmanager.tests.unit.orchestrator.states.fakes import FakeLoad from dcmanager.tests.unit.orchestrator.states.fakes import PREVIOUS_VERSION from dcmanager.tests.unit.orchestrator.states.upgrade.test_base \ import TestSwUpgradeState -from tsconfig.tsconfig import SW_VERSION - # UpgradingSimplexState uses SW_VERSION as the upgraded version check UPGRADED_VERSION = SW_VERSION @@ -38,8 +37,8 @@ class TestSwUpgradeUpgradingSimplexStage(TestSwUpgradeState): self.subcloud = self.setup_subcloud() # Add the strategy_step state being processed by this unit test - self.strategy_step = \ - self.setup_strategy_step(self.subcloud.id, consts.STRATEGY_STATE_UPGRADING_SIMPLEX) + self.strategy_step = self.setup_strategy_step( + self.subcloud.id, consts.STRATEGY_STATE_UPGRADING_SIMPLEX) # simulate get_vault_load_files finding the iso and sig in the vault p = mock.patch('dcmanager.common.utils.get_vault_load_files') diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/test_base.py b/distributedcloud/dcmanager/tests/unit/orchestrator/test_base.py index cf050acda..6f8000e14 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/test_base.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/test_base.py @@ -1,18 +1,19 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import mock - from oslo_config import cfg from dccommon import consts as dccommon_consts @@ -21,7 +22,6 @@ from dcmanager.common import context from dcmanager.db.sqlalchemy import api as db_api from dcmanager.orchestrator.states.base import BaseState from dcmanager.orchestrator import sw_update_manager - from dcmanager.tests import base from dcmanager.tests.unit.common import fake_strategy from dcmanager.tests.unit.common import fake_subcloud diff --git a/distributedcloud/dcmanager/tests/unit/orchestrator/test_sw_update_manager.py b/distributedcloud/dcmanager/tests/unit/orchestrator/test_sw_update_manager.py index ccec7d14f..66d20b10c 100644 --- a/distributedcloud/dcmanager/tests/unit/orchestrator/test_sw_update_manager.py +++ b/distributedcloud/dcmanager/tests/unit/orchestrator/test_sw_update_manager.py @@ -1,15 +1,17 @@ -# Copyright (c) 2017-2023 Wind River Systems, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2017-2024 Wind River Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. # import base64 @@ -32,16 +34,16 @@ from dcmanager.tests import base from dcmanager.tests import utils -OAM_FLOATING_IP = '10.10.10.12' +OAM_FLOATING_IP = "10.10.10.12" CONF = cfg.CONF -FAKE_ID = '1' +FAKE_ID = "1" FAKE_SW_UPDATE_DATA = { "type": consts.SW_UPDATE_TYPE_PATCH, "subcloud-apply-type": consts.SUBCLOUD_APPLY_TYPE_PARALLEL, "max-parallel-subclouds": "2", "stop-on-failure": "true", "force": "false", - "state": consts.SW_UPDATE_STATE_INITIAL + "state": consts.SW_UPDATE_STATE_INITIAL, } FAKE_SW_PRESTAGE_DATA = { @@ -62,8 +64,7 @@ FAKE_SW_PATCH_DATA = { "state": consts.SW_UPDATE_STATE_INITIAL, } -health_report_no_mgmt_alarm = \ - "System Health:\n \ +health_report_no_mgmt_alarm = "System Health:\n \ All hosts are provisioned: [Fail]\n \ 1 Unprovisioned hosts\n \ All hosts are unlocked/enabled: [OK]\n \ @@ -78,7 +79,7 @@ class Subcloud(object): def __init__(self, id, name, group_id, is_managed, is_online): self.id = id self.name = name - self.software_version = '12.04' + self.software_version = "12.04" self.group_id = group_id if is_managed: self.management_state = dccommon_consts.MANAGEMENT_MANAGED @@ -98,7 +99,6 @@ class FakeOrchThread(object): class FakeDCManagerAuditAPI(object): - def __init__(self): self.trigger_patch_audit = mock.MagicMock() @@ -110,7 +110,7 @@ class TestSwUpdateManager(base.DCManagerTestCase): "name": name, "description": "subcloud1 description", "location": "subcloud1 location", - 'software_version': "18.03", + "software_version": "18.03", "management_subnet": "192.168.101.0/24", "management_gateway_ip": "192.168.101.1", "management_start_ip": "192.168.101.3", @@ -126,17 +126,18 @@ class TestSwUpdateManager(base.DCManagerTestCase): subcloud = db_api.subcloud_create(ctxt, **values) if is_managed: state = dccommon_consts.MANAGEMENT_MANAGED - subcloud = db_api.subcloud_update(ctxt, subcloud.id, - management_state=state) + subcloud = db_api.subcloud_update( + ctxt, subcloud.id, management_state=state + ) if is_online: status = dccommon_consts.AVAILABILITY_ONLINE - subcloud = db_api.subcloud_update(ctxt, subcloud.id, - availability_status=status) + subcloud = db_api.subcloud_update( + ctxt, subcloud.id, availability_status=status + ) return subcloud @staticmethod - def create_subcloud_group(ctxt, name, update_apply_type, - max_parallel_subclouds): + def create_subcloud_group(ctxt, name, update_apply_type, max_parallel_subclouds): values = { "name": name, "description": "subcloud1 description", @@ -146,8 +147,7 @@ class TestSwUpdateManager(base.DCManagerTestCase): return db_api.subcloud_group_create(ctxt, **values) @staticmethod - def update_subcloud_status(ctxt, subcloud_id, - endpoint=None, status=None): + def update_subcloud_status(ctxt, subcloud_id, endpoint=None, status=None): if endpoint: endpoint_type = endpoint else: @@ -157,10 +157,9 @@ class TestSwUpdateManager(base.DCManagerTestCase): else: sync_status = dccommon_consts.SYNC_STATUS_OUT_OF_SYNC - subcloud_status = db_api.subcloud_status_update(ctxt, - subcloud_id, - endpoint_type, - sync_status) + subcloud_status = db_api.subcloud_status_update( + ctxt, subcloud_id, endpoint_type, sync_status + ) return subcloud_status @staticmethod @@ -188,158 +187,174 @@ class TestSwUpdateManager(base.DCManagerTestCase): super(TestSwUpdateManager, self).setUp() # Mock the context self.ctxt = utils.dummy_context() - p = mock.patch.object(context, 'get_admin_context') + p = mock.patch.object(context, "get_admin_context") self.mock_get_admin_context = p.start() self.mock_get_admin_context.return_value = self.ctx self.addCleanup(p.stop) # Note: mock where an item is used, not where it comes from self.fake_sw_upgrade_orch_thread = FakeOrchThread() - p = mock.patch.object(sw_update_manager, 'SwUpgradeOrchThread') + p = mock.patch.object(sw_update_manager, "SwUpgradeOrchThread") self.mock_sw_upgrade_orch_thread = p.start() - self.mock_sw_upgrade_orch_thread.return_value = \ + self.mock_sw_upgrade_orch_thread.return_value = ( self.fake_sw_upgrade_orch_thread + ) self.addCleanup(p.stop) self.fake_fw_update_orch_thread = FakeOrchThread() - p = mock.patch.object(sw_update_manager, 'FwUpdateOrchThread') + p = mock.patch.object(sw_update_manager, "FwUpdateOrchThread") self.mock_fw_update_orch_thread = p.start() - self.mock_fw_update_orch_thread.return_value = \ + self.mock_fw_update_orch_thread.return_value = ( self.fake_fw_update_orch_thread + ) self.addCleanup(p.stop) self.fake_kube_upgrade_orch_thread = FakeOrchThread() - p = mock.patch.object(sw_update_manager, 'KubeUpgradeOrchThread') + p = mock.patch.object(sw_update_manager, "KubeUpgradeOrchThread") self.mock_kube_upgrade_orch_thread = p.start() - self.mock_kube_upgrade_orch_thread.return_value = \ + self.mock_kube_upgrade_orch_thread.return_value = ( self.fake_kube_upgrade_orch_thread + ) self.addCleanup(p.stop) self.fake_kube_rootca_update_orch_thread = FakeOrchThread() - p = mock.patch.object(sw_update_manager, 'KubeRootcaUpdateOrchThread') + p = mock.patch.object(sw_update_manager, "KubeRootcaUpdateOrchThread") self.mock_kube_rootca_update_orch_thread = p.start() - self.mock_kube_rootca_update_orch_thread.return_value = \ + self.mock_kube_rootca_update_orch_thread.return_value = ( self.fake_kube_rootca_update_orch_thread + ) self.addCleanup(p.stop) self.fake_prestage_orch_thread = FakeOrchThread() - p = mock.patch.object(sw_update_manager, 'PrestageOrchThread') + p = mock.patch.object(sw_update_manager, "PrestageOrchThread") self.mock_prestage_orch_thread = p.start() - self.mock_prestage_orch_thread.return_value = \ - self.fake_prestage_orch_thread + self.mock_prestage_orch_thread.return_value = self.fake_prestage_orch_thread self.addCleanup(p.stop) # Mock the dcmanager audit API self.fake_dcmanager_audit_api = FakeDCManagerAuditAPI() - p = mock.patch('dcmanager.audit.rpcapi.ManagerAuditClient') + p = mock.patch("dcmanager.audit.rpcapi.ManagerAuditClient") self.mock_dcmanager_audit_api = p.start() - self.mock_dcmanager_audit_api.return_value = \ - self.fake_dcmanager_audit_api + self.mock_dcmanager_audit_api.return_value = self.fake_dcmanager_audit_api self.addCleanup(p.stop) # Fake subcloud groups # Group 1 exists by default in database with max_parallel 2 and # apply_type parallel - self.fake_group2 = self.create_subcloud_group(self.ctxt, - "Group2", - consts.SUBCLOUD_APPLY_TYPE_SERIAL, - 2) - self.fake_group3 = self.create_subcloud_group(self.ctxt, - "Group3", - consts.SUBCLOUD_APPLY_TYPE_PARALLEL, - 2) - self.fake_group4 = self.create_subcloud_group(self.ctxt, - "Group4", - consts.SUBCLOUD_APPLY_TYPE_SERIAL, - 2) - self.fake_group5 = self.create_subcloud_group(self.ctxt, - "Group5", - consts.SUBCLOUD_APPLY_TYPE_PARALLEL, - 2) + self.fake_group2 = self.create_subcloud_group( + self.ctxt, "Group2", consts.SUBCLOUD_APPLY_TYPE_SERIAL, 2 + ) + self.fake_group3 = self.create_subcloud_group( + self.ctxt, "Group3", consts.SUBCLOUD_APPLY_TYPE_PARALLEL, 2 + ) + self.fake_group4 = self.create_subcloud_group( + self.ctxt, "Group4", consts.SUBCLOUD_APPLY_TYPE_SERIAL, 2 + ) + self.fake_group5 = self.create_subcloud_group( + self.ctxt, "Group5", consts.SUBCLOUD_APPLY_TYPE_PARALLEL, 2 + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_init(self, mock_patch_orch_thread): um = sw_update_manager.SwUpdateManager() self.assertIsNotNone(um) - self.assertEqual('sw_update_manager', um.service_name) - self.assertEqual('localhost', um.host) + self.assertEqual("sw_update_manager", um.service_name) + self.assertEqual("localhost", um.host) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_update_strategy_no_subclouds( - self, mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_update_strategy_no_subclouds(self, mock_patch_orch_thread): um = sw_update_manager.SwUpdateManager() # No strategy will be created, so it should raise: # 'Bad strategy request: Strategy has no steps to apply' - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=FAKE_SW_UPDATE_DATA) + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=FAKE_SW_UPDATE_DATA, + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_for_a_single_group( - self, mock_patch_orch_thread): + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group2.id, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group2.id, + is_managed=True, + is_online=True, + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', - self.fake_group2.id, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group2.id, + is_managed=False, + is_online=True, + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) data = copy.copy(FAKE_SW_UPDATE_DATA) - data['subcloud_group'] = str(self.fake_group2.id) + data["subcloud_group"] = str(self.fake_group2.id) um = sw_update_manager.SwUpdateManager() - response = um.create_sw_update_strategy( - self.ctxt, payload=data) + response = um.create_sw_update_strategy(self.ctxt, payload=data) # Verify strategy was created as expected using group values - self.assertEqual(response['max-parallel-subclouds'], 1) - self.assertEqual(response['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_SERIAL) - self.assertEqual(response['type'], - FAKE_SW_UPDATE_DATA['type']) + self.assertEqual(response["max-parallel-subclouds"], 1) + self.assertEqual( + response["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_SERIAL + ) + self.assertEqual(response["type"], FAKE_SW_UPDATE_DATA["type"]) # Verify strategy step was created as expected strategy_steps = db_api.strategy_step_get_all(self.ctx) - self.assertEqual(strategy_steps[0]['state'], - consts.STRATEGY_STATE_INITIAL) - self.assertEqual(strategy_steps[0]['details'], - '') - self.assertEqual(strategy_steps[0]['subcloud_id'], - 1) + self.assertEqual(strategy_steps[0]["state"], consts.STRATEGY_STATE_INITIAL) + self.assertEqual(strategy_steps[0]["details"], "") + self.assertEqual(strategy_steps[0]["subcloud_id"], 1) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_parallel_for_a_single_group( - self, mock_patch_orch_thread): + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, fake_subcloud1.id, - endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud1.id, endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD + ) - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', - self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, fake_subcloud2.id, - endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud2.id, endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD + ) data = copy.copy(FAKE_SW_UPDATE_DATA) data["type"] = consts.SW_UPDATE_TYPE_UPGRADE - data['subcloud_group'] = str(self.fake_group3.id) + data["subcloud_group"] = str(self.fake_group3.id) um = sw_update_manager.SwUpdateManager() - response = um.create_sw_update_strategy( - self.ctxt, payload=data) + response = um.create_sw_update_strategy(self.ctxt, payload=data) # Verify strategy was created as expected using group values - self.assertEqual(response['max-parallel-subclouds'], 2) - self.assertEqual(response['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(response['type'], consts.SW_UPDATE_TYPE_UPGRADE) + self.assertEqual(response["max-parallel-subclouds"], 2) + self.assertEqual( + response["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(response["type"], consts.SW_UPDATE_TYPE_UPGRADE) # Verify the strategy step list subcloud_ids = [1, 2] @@ -347,45 +362,57 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(prestage, 'initial_subcloud_validate') - @mock.patch.object(prestage, 'global_prestage_validate') - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(prestage, "initial_subcloud_validate") + @mock.patch.object(prestage, "global_prestage_validate") + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_prestage_strategy_parallel_for_a_single_group( - self, - mock_patch_orch_thread, - mock_global_prestage_validate, - mock_initial_subcloud_validate): - + self, + mock_patch_orch_thread, + mock_global_prestage_validate, + mock_initial_subcloud_validate, + ): # Create fake subclouds and respective status - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, fake_subcloud1.id, - endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud1.id, endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD + ) - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', - self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, fake_subcloud2.id, - endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud2.id, endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD + ) mock_global_prestage_validate.return_value = None mock_initial_subcloud_validate.return_value = None data = copy.copy(FAKE_SW_PRESTAGE_DATA) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data['sysadmin_password'] = fake_password + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data["sysadmin_password"] = fake_password - data['subcloud_group'] = str(self.fake_group3.id) + data["subcloud_group"] = str(self.fake_group3.id) um = sw_update_manager.SwUpdateManager() - response = um.create_sw_update_strategy( - self.ctxt, payload=data) + response = um.create_sw_update_strategy(self.ctxt, payload=data) # Verify strategy was created as expected using group values - self.assertEqual(response['max-parallel-subclouds'], 2) - self.assertEqual(response['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(response['type'], consts.SW_UPDATE_TYPE_PRESTAGE) + self.assertEqual(response["max-parallel-subclouds"], 2) + self.assertEqual( + response["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(response["type"], consts.SW_UPDATE_TYPE_PRESTAGE) # Verify the strategy step list subcloud_ids = [1, 2] @@ -393,64 +420,75 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(prestage, 'initial_subcloud_validate') - @mock.patch.object(prestage, 'global_prestage_validate') - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_prestage_strategy_load_in_sync_out_of_sync_unknown_and_no_load( - self, - mock_patch_orch_thread, - mock_global_prestage_validate, - mock_initial_subcloud_validate): - + @mock.patch.object(prestage, "initial_subcloud_validate") + @mock.patch.object(prestage, "global_prestage_validate") + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_prestage_strategy_load_insync_out_of_sync_unknown_and_no_load( + self, + mock_patch_orch_thread, + mock_global_prestage_validate, + mock_initial_subcloud_validate, + ): # Create fake subclouds and respective status # Subcloud1 will be prestaged load in sync - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_IN_SYNC, + ) # Subcloud2 will be prestaged load is None - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud2.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - None) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud2.id, dccommon_consts.ENDPOINT_TYPE_LOAD, None + ) # Subcloud3 will be prestaged load out of sync - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud3.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud3.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud4 will be prestaged sync unknown - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 1, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_UNKNOWN) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 1, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud4.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_UNKNOWN, + ) mock_global_prestage_validate.return_value = None mock_initial_subcloud_validate.return_value = None data = copy.copy(FAKE_SW_PRESTAGE_DATA) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data['sysadmin_password'] = fake_password + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data["sysadmin_password"] = fake_password um = sw_update_manager.SwUpdateManager() - response = um.create_sw_update_strategy( - self.ctxt, payload=data) + response = um.create_sw_update_strategy(self.ctxt, payload=data) # Verify strategy was created as expected using group values - self.assertEqual(response['max-parallel-subclouds'], 2) - self.assertEqual(response['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(response['type'], consts.SW_UPDATE_TYPE_PRESTAGE) + self.assertEqual(response["max-parallel-subclouds"], 2) + self.assertEqual( + response["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(response["type"], consts.SW_UPDATE_TYPE_PRESTAGE) # Verify the strategy step list subcloud_ids = [1, 2, 3, 4] @@ -458,134 +496,173 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(prestage, 'initial_subcloud_validate') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_prestage_strategy_no_password(self, - mock_patch_orch_thread, - mock_controller_upgrade, - mock_initial_subcloud_validate): - + @mock.patch.object(prestage, "initial_subcloud_validate") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_prestage_strategy_no_password( + self, + mock_patch_orch_thread, + mock_controller_upgrade, + mock_initial_subcloud_validate, + ): # Create fake subclouds and respective status - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, fake_subcloud1.id, - endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud1.id, endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD + ) - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', - self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, fake_subcloud2.id, - endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud2.id, endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD + ) mock_initial_subcloud_validate.return_value = None mock_controller_upgrade.return_value = list() data = copy.copy(FAKE_SW_PRESTAGE_DATA) - data['sysadmin_password'] = '' - data['subcloud_group'] = str(self.fake_group3.id) + data["sysadmin_password"] = "" + data["subcloud_group"] = str(self.fake_group3.id) um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=data) + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=data, + ) - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_prestage_strategy_backup_in_progress(self, - mock_patch_orch_thread, - mock_controller_upgrade): + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_prestage_strategy_backup_in_progress( + self, mock_patch_orch_thread, mock_controller_upgrade + ): mock_controller_upgrade.return_value = list() # Create fake subcloud and respective status (managed & online) - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group3.id, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) - db_api.subcloud_update(self.ctx, - fake_subcloud1.id, - backup_status=consts.BACKUP_STATE_IN_PROGRESS) + db_api.subcloud_update( + self.ctx, + fake_subcloud1.id, + backup_status=consts.BACKUP_STATE_IN_PROGRESS, + ) data = copy.copy(FAKE_SW_PRESTAGE_DATA) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode( - 'ascii') - data['sysadmin_password'] = fake_password - data['cloud_name'] = 'subcloud1' + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data["sysadmin_password"] = fake_password + data["cloud_name"] = "subcloud1" um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=data) + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=data, + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_update_strategy_cloud_name_not_exists(self, - mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_update_strategy_cloud_name_not_exists( + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group3.id, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) data = copy.copy(FAKE_SW_UPDATE_DATA) # Create a strategy with a cloud_name that doesn't exist - data['cloud_name'] = 'subcloud2' + data["cloud_name"] = "subcloud2" um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=data) - - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_update_strategy_parallel( - self, mock_patch_orch_thread): + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=data, + ) + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_update_strategy_parallel(self, mock_patch_orch_thread): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will not be patched because patching is in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - None, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud4.id, None, dccommon_consts.SYNC_STATUS_IN_SYNC + ) # Subcloud5 will be patched - fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2, - is_managed=True, is_online=True) + fake_subcloud5 = self.create_subcloud( + self.ctxt, "subcloud5", 2, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud5.id) # Subcloud6 will be patched - fake_subcloud6 = self.create_subcloud(self.ctxt, 'subcloud6', 3, - is_managed=True, is_online=True) + fake_subcloud6 = self.create_subcloud( + self.ctxt, "subcloud6", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud6.id) # Subcloud7 will be patched - fake_subcloud7 = self.create_subcloud(self.ctxt, 'subcloud7', 3, - is_managed=True, is_online=True) + fake_subcloud7 = self.create_subcloud( + self.ctxt, "subcloud7", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud7.id) um = sw_update_manager.SwUpdateManager() - strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=FAKE_SW_UPDATE_DATA) + strategy_dict = um.create_sw_update_strategy( + self.ctxt, payload=FAKE_SW_UPDATE_DATA + ) # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['max-parallel-subclouds'], 2) - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) + self.assertEqual(strategy_dict["max-parallel-subclouds"], 2) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) # Verify the strategy step list subcloud_ids = [1, 3, 5, 6, 7] @@ -593,111 +670,148 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_patching_subcloud_in_sync_out_of_sync( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Subcloud 1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', - self.fake_group3.id, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_PATCHING, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_PATCHING, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 2 will not be patched because it is offline - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', - self.fake_group3.id, - is_managed=True, is_online=False) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group3.id, + is_managed=True, + is_online=False, + ) - self.update_subcloud_status(self.ctxt, fake_subcloud2.id, - dccommon_consts.ENDPOINT_TYPE_PATCHING, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + self.update_subcloud_status( + self.ctxt, + fake_subcloud2.id, + dccommon_consts.ENDPOINT_TYPE_PATCHING, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', - self.fake_group3.id, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, + "subcloud3", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) - self.update_subcloud_status(self.ctxt, fake_subcloud3.id, - dccommon_consts.ENDPOINT_TYPE_PATCHING, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + self.update_subcloud_status( + self.ctxt, + fake_subcloud3.id, + dccommon_consts.ENDPOINT_TYPE_PATCHING, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 4 will not be patched because it is in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', - self.fake_group3.id, - is_managed=True, is_online=True) + fake_subcloud4 = self.create_subcloud( + self.ctxt, + "subcloud4", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) - self.update_subcloud_status(self.ctxt, fake_subcloud4.id, - dccommon_consts.ENDPOINT_TYPE_PATCHING, - dccommon_consts.SYNC_STATUS_IN_SYNC) + self.update_subcloud_status( + self.ctxt, + fake_subcloud4.id, + dccommon_consts.ENDPOINT_TYPE_PATCHING, + dccommon_consts.SYNC_STATUS_IN_SYNC, + ) data = copy.copy(FAKE_SW_PATCH_DATA) data["type"] = consts.SW_UPDATE_TYPE_PATCH - data['subcloud_group'] = str(self.fake_group3.id) + data["subcloud_group"] = str(self.fake_group3.id) um = sw_update_manager.SwUpdateManager() - response = um.create_sw_update_strategy( - self.ctxt, payload=data) + response = um.create_sw_update_strategy(self.ctxt, payload=data) # Verify strategy was created as expected using group values - self.assertEqual(response['max-parallel-subclouds'], 2) - self.assertEqual(response['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(response['type'], consts.SW_UPDATE_TYPE_PATCH) + self.assertEqual(response["max-parallel-subclouds"], 2) + self.assertEqual( + response["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(response["type"], consts.SW_UPDATE_TYPE_PATCH) # Verify the strategy step list subcloud_ids = [1, 3] strategy_step_list = db_api.strategy_step_get_all(self.ctxt) subcloud_id_processed = [] for strategy_step in strategy_step_list: - subcloud_id_processed.append(strategy_step.subcloud_id) + subcloud_id_processed.append(strategy_step.subcloud_id) self.assertEqual(subcloud_ids, subcloud_id_processed) - @mock.patch.object(cutils, 'get_systemcontroller_installed_loads') - @mock.patch.object(prestage, 'initial_subcloud_validate') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_prestage_strategy_parallel(self, - mock_patch_orch_thread, - mock_controller_upgrade, - mock_initial_subcloud_validate, - mock_installed_loads): - + @mock.patch.object(cutils, "get_systemcontroller_installed_loads") + @mock.patch.object(prestage, "initial_subcloud_validate") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_prestage_strategy_parallel( + self, + mock_patch_orch_thread, + mock_controller_upgrade, + mock_initial_subcloud_validate, + mock_installed_loads, + ): # Create fake subclouds and respective status # Subcloud1 will be prestaged - self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) # Subcloud2 will not be prestaged because not managed - self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) # Subcloud3 will be prestaged - self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) # Subcloud4 will not be prestaged because offline - self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=False) + self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=False + ) # Subcloud5 will be prestaged - self.create_subcloud(self.ctxt, 'subcloud5', 2, - is_managed=True, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud5", 2, is_managed=True, is_online=True + ) # Subcloud6 will be prestaged - self.create_subcloud(self.ctxt, 'subcloud6', 3, - is_managed=True, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud6", 3, is_managed=True, is_online=True + ) # Subcloud7 will be prestaged - self.create_subcloud(self.ctxt, 'subcloud7', 3, - is_managed=True, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud7", 3, is_managed=True, is_online=True + ) data = copy.copy(FAKE_SW_PRESTAGE_DATA) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data['sysadmin_password'] = fake_password - fake_release = '21.12' + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data["sysadmin_password"] = fake_password + fake_release = "21.12" data[consts.PRESTAGE_REQUEST_RELEASE] = fake_release mock_installed_loads.return_value = [fake_release] @@ -708,12 +822,14 @@ class TestSwUpdateManager(base.DCManagerTestCase): mock_controller_upgrade.return_value = list() # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['max-parallel-subclouds'], 2) - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(fake_release, - strategy_dict['extra-args'].get( - consts.PRESTAGE_SOFTWARE_VERSION)) + self.assertEqual(strategy_dict["max-parallel-subclouds"], 2) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual( + fake_release, + strategy_dict["extra-args"].get(consts.PRESTAGE_SOFTWARE_VERSION), + ) # Verify the strategy step list subcloud_ids = [1, 3, 5, 6, 7] @@ -723,46 +839,50 @@ class TestSwUpdateManager(base.DCManagerTestCase): subcloud_id_processed.append(strategy_step.subcloud_id) self.assertEqual(subcloud_ids, subcloud_id_processed) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_update_strategy_serial( - self, mock_patch_orch_thread): - + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_update_strategy_serial(self, mock_patch_orch_thread): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will not be patched because patching is in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - None, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud4.id, None, dccommon_consts.SYNC_STATUS_IN_SYNC + ) # Subcloud5 will be patched - fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2, - is_managed=True, is_online=True) + fake_subcloud5 = self.create_subcloud( + self.ctxt, "subcloud5", 2, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud5.id) # Subcloud6 will be patched - fake_subcloud6 = self.create_subcloud(self.ctxt, 'subcloud6', 3, - is_managed=True, is_online=True) + fake_subcloud6 = self.create_subcloud( + self.ctxt, "subcloud6", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud6.id) # Subcloud7 will be patched - fake_subcloud7 = self.create_subcloud(self.ctxt, 'subcloud7', 3, - is_managed=True, is_online=True) + fake_subcloud7 = self.create_subcloud( + self.ctxt, "subcloud7", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud7.id) um = sw_update_manager.SwUpdateManager() @@ -771,9 +891,10 @@ class TestSwUpdateManager(base.DCManagerTestCase): strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['max-parallel-subclouds'], 1) - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_SERIAL) + self.assertEqual(strategy_dict["max-parallel-subclouds"], 1) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_SERIAL + ) # Verify the strategy step list subcloud_ids = [1, 3, 5, 6, 7] @@ -781,90 +902,102 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_using_group_apply_type( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will not be patched because patching is in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - None, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud4.id, None, dccommon_consts.SYNC_STATUS_IN_SYNC + ) # Subcloud5 will be patched - fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2, - is_managed=True, is_online=True) + fake_subcloud5 = self.create_subcloud( + self.ctxt, "subcloud5", 2, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud5.id) # Subcloud6 will be patched - fake_subcloud6 = self.create_subcloud(self.ctxt, 'subcloud6', 3, - is_managed=True, is_online=True) + fake_subcloud6 = self.create_subcloud( + self.ctxt, "subcloud6", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud6.id) # Subcloud7 will be patched - fake_subcloud7 = self.create_subcloud(self.ctxt, 'subcloud7', 3, - is_managed=True, is_online=True) + fake_subcloud7 = self.create_subcloud( + self.ctxt, "subcloud7", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud7.id) # Subcloud8 will be patched - fake_subcloud8 = self.create_subcloud(self.ctxt, 'subcloud8', 4, - is_managed=True, is_online=True) + fake_subcloud8 = self.create_subcloud( + self.ctxt, "subcloud8", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud8.id) # Subcloud9 will be patched - fake_subcloud9 = self.create_subcloud(self.ctxt, 'subcloud9', 4, - is_managed=True, is_online=True) + fake_subcloud9 = self.create_subcloud( + self.ctxt, "subcloud9", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud9.id) # Subcloud10 will be patched - fake_subcloud10 = self.create_subcloud(self.ctxt, 'subcloud10', 4, - is_managed=True, is_online=True) + fake_subcloud10 = self.create_subcloud( + self.ctxt, "subcloud10", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud10.id) # Subcloud11 will be patched - fake_subcloud11 = self.create_subcloud(self.ctxt, 'subcloud11', 5, - is_managed=True, is_online=True) + fake_subcloud11 = self.create_subcloud( + self.ctxt, "subcloud11", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud11.id) # Subcloud12 will be patched - fake_subcloud12 = self.create_subcloud(self.ctxt, 'subcloud12', 5, - is_managed=True, is_online=True) + fake_subcloud12 = self.create_subcloud( + self.ctxt, "subcloud12", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud12.id) # Subcloud13 will be patched - fake_subcloud13 = self.create_subcloud(self.ctxt, 'subcloud13', 5, - is_managed=True, is_online=True) + fake_subcloud13 = self.create_subcloud( + self.ctxt, "subcloud13", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud13.id) data = copy.copy(FAKE_SW_UPDATE_DATA) - del data['subcloud-apply-type'] + del data["subcloud-apply-type"] um = sw_update_manager.SwUpdateManager() strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that group values are being used for subcloud_apply_type - self.assertEqual(strategy_dict['subcloud-apply-type'], None) + self.assertEqual(strategy_dict["subcloud-apply-type"], None) # Assert that values passed through CLI are used instead of # group values for max_parallel_subclouds - self.assertEqual(strategy_dict['max-parallel-subclouds'], 2) + self.assertEqual(strategy_dict["max-parallel-subclouds"], 2) # Verify the strategy step list subcloud_ids = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13] @@ -872,92 +1005,107 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_using_group_max_parallel( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will not be patched because patching is in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - None, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud4.id, None, dccommon_consts.SYNC_STATUS_IN_SYNC + ) # Subcloud5 will be patched - fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2, - is_managed=True, is_online=True) + fake_subcloud5 = self.create_subcloud( + self.ctxt, "subcloud5", 2, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud5.id) # Subcloud6 will be patched - fake_subcloud6 = self.create_subcloud(self.ctxt, 'subcloud6', 3, - is_managed=True, is_online=True) + fake_subcloud6 = self.create_subcloud( + self.ctxt, "subcloud6", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud6.id) # Subcloud7 will be patched - fake_subcloud7 = self.create_subcloud(self.ctxt, 'subcloud7', 3, - is_managed=True, is_online=True) + fake_subcloud7 = self.create_subcloud( + self.ctxt, "subcloud7", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud7.id) # Subcloud8 will be patched - fake_subcloud8 = self.create_subcloud(self.ctxt, 'subcloud8', 4, - is_managed=True, is_online=True) + fake_subcloud8 = self.create_subcloud( + self.ctxt, "subcloud8", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud8.id) # Subcloud9 will be patched - fake_subcloud9 = self.create_subcloud(self.ctxt, 'subcloud9', 4, - is_managed=True, is_online=True) + fake_subcloud9 = self.create_subcloud( + self.ctxt, "subcloud9", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud9.id) # Subcloud10 will be patched - fake_subcloud10 = self.create_subcloud(self.ctxt, 'subcloud10', 4, - is_managed=True, is_online=True) + fake_subcloud10 = self.create_subcloud( + self.ctxt, "subcloud10", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud10.id) # Subcloud11 will be patched - fake_subcloud11 = self.create_subcloud(self.ctxt, 'subcloud11', 5, - is_managed=True, is_online=True) + fake_subcloud11 = self.create_subcloud( + self.ctxt, "subcloud11", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud11.id) # Subcloud12 will be patched - fake_subcloud12 = self.create_subcloud(self.ctxt, 'subcloud12', 5, - is_managed=True, is_online=True) + fake_subcloud12 = self.create_subcloud( + self.ctxt, "subcloud12", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud12.id) # Subcloud13 will be patched - fake_subcloud13 = self.create_subcloud(self.ctxt, 'subcloud13', 5, - is_managed=True, is_online=True) + fake_subcloud13 = self.create_subcloud( + self.ctxt, "subcloud13", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud13.id) data = copy.copy(FAKE_SW_UPDATE_DATA) - del data['max-parallel-subclouds'] + del data["max-parallel-subclouds"] um = sw_update_manager.SwUpdateManager() strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that values passed through CLI are used instead of # group values for max_parallel_subclouds - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) # Assert that group values are being used for subcloud_apply_type - self.assertEqual(strategy_dict['max-parallel-subclouds'], - consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS) + self.assertEqual( + strategy_dict["max-parallel-subclouds"], + consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS, + ) # Verify the strategy step list subcloud_ids = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13] @@ -965,89 +1113,103 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_using_all_group_values( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will not be patched because patching is in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - None, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud4.id, None, dccommon_consts.SYNC_STATUS_IN_SYNC + ) # Subcloud5 will be patched - fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2, - is_managed=True, is_online=True) + fake_subcloud5 = self.create_subcloud( + self.ctxt, "subcloud5", 2, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud5.id) # Subcloud6 will be patched - fake_subcloud6 = self.create_subcloud(self.ctxt, 'subcloud6', 3, - is_managed=True, is_online=True) + fake_subcloud6 = self.create_subcloud( + self.ctxt, "subcloud6", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud6.id) # Subcloud7 will be patched - fake_subcloud7 = self.create_subcloud(self.ctxt, 'subcloud7', 3, - is_managed=True, is_online=True) + fake_subcloud7 = self.create_subcloud( + self.ctxt, "subcloud7", 3, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud7.id) # Subcloud8 will be patched - fake_subcloud8 = self.create_subcloud(self.ctxt, 'subcloud8', 4, - is_managed=True, is_online=True) + fake_subcloud8 = self.create_subcloud( + self.ctxt, "subcloud8", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud8.id) # Subcloud9 will be patched - fake_subcloud9 = self.create_subcloud(self.ctxt, 'subcloud9', 4, - is_managed=True, is_online=True) + fake_subcloud9 = self.create_subcloud( + self.ctxt, "subcloud9", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud9.id) # Subcloud10 will be patched - fake_subcloud10 = self.create_subcloud(self.ctxt, 'subcloud10', 4, - is_managed=True, is_online=True) + fake_subcloud10 = self.create_subcloud( + self.ctxt, "subcloud10", 4, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud10.id) # Subcloud11 will be patched - fake_subcloud11 = self.create_subcloud(self.ctxt, 'subcloud11', 5, - is_managed=True, is_online=True) + fake_subcloud11 = self.create_subcloud( + self.ctxt, "subcloud11", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud11.id) # Subcloud12 will be patched - fake_subcloud12 = self.create_subcloud(self.ctxt, 'subcloud12', 5, - is_managed=True, is_online=True) + fake_subcloud12 = self.create_subcloud( + self.ctxt, "subcloud12", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud12.id) # Subcloud13 will be patched - fake_subcloud13 = self.create_subcloud(self.ctxt, 'subcloud13', 5, - is_managed=True, is_online=True) + fake_subcloud13 = self.create_subcloud( + self.ctxt, "subcloud13", 5, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud13.id) data = copy.copy(FAKE_SW_UPDATE_DATA) - del data['subcloud-apply-type'] - del data['max-parallel-subclouds'] + del data["subcloud-apply-type"] + del data["max-parallel-subclouds"] um = sw_update_manager.SwUpdateManager() strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that group values are being used - self.assertEqual(strategy_dict['max-parallel-subclouds'], - consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS) - self.assertEqual(strategy_dict['subcloud-apply-type'], None) + self.assertEqual( + strategy_dict["max-parallel-subclouds"], + consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS, + ) + self.assertEqual(strategy_dict["subcloud-apply-type"], None) # Verify the strategy step list subcloud_ids = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13] @@ -1055,46 +1217,54 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_unknown_sync_status( - self, mock_patch_orch_thread): + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status # Subcloud1 will be patched - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will not be patched because not managed - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=False, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=False, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be patched - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will not be patched because patching is not in sync - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 2, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud4.id, - None, - dccommon_consts.SYNC_STATUS_UNKNOWN) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 2, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, fake_subcloud4.id, None, dccommon_consts.SYNC_STATUS_UNKNOWN + ) um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=FAKE_SW_UPDATE_DATA) - - @mock.patch.object(prestage, '_get_prestage_subcloud_info') - @mock.patch.object(prestage, '_get_system_controller_upgrades') - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_prestage_strategy_duplex(self, - mock_patch_orch_thread, - mock_controller_upgrade, - mock_prestage_subcloud_info): + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=FAKE_SW_UPDATE_DATA, + ) + @mock.patch.object(prestage, "_get_prestage_subcloud_info") + @mock.patch.object(prestage, "_get_system_controller_upgrades") + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_prestage_strategy_duplex( + self, + mock_patch_orch_thread, + mock_controller_upgrade, + mock_prestage_subcloud_info, + ): # Create fake subclouds and respective status # A note on subcloud system mode = duplex checking: For this test case @@ -1107,46 +1277,55 @@ class TestSwUpdateManager(base.DCManagerTestCase): # # Therefore, subcloud1 will be included in the strategy but not be # prestaged because during the apply we find out it is a duplex - self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) + self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) data = copy.copy(FAKE_SW_PRESTAGE_DATA) - fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii') - data['sysadmin_password'] = fake_password + fake_password = (base64.b64encode("testpass".encode("utf-8"))).decode( + "ascii" + ) + data["sysadmin_password"] = fake_password mock_controller_upgrade.return_value = list() - mock_prestage_subcloud_info.return_value = consts.SYSTEM_MODE_DUPLEX, \ - health_report_no_mgmt_alarm, \ - OAM_FLOATING_IP + mock_prestage_subcloud_info.return_value = ( + consts.SYSTEM_MODE_DUPLEX, + health_report_no_mgmt_alarm, + OAM_FLOATING_IP, + ) um = sw_update_manager.SwUpdateManager() um.create_sw_update_strategy(self.ctxt, payload=data) strategy_step_list = db_api.strategy_step_get_all(self.ctxt) self.assertEqual(1, len(strategy_step_list)) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_offline_subcloud_no_force( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Create fake subclouds and respective status # Subcloud1 will not be included in the strategy as it's offline - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=False) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=False + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) # Subcloud2 will be included in the strategy as it's online - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1, - is_managed=True, is_online=True) + fake_subcloud2 = self.create_subcloud( + self.ctxt, "subcloud2", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud2.id) # Subcloud3 will be included in the strategy as it's online - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1, - is_managed=True, is_online=True) + fake_subcloud3 = self.create_subcloud( + self.ctxt, "subcloud3", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud3.id) # Subcloud4 will be included in the strategy as it's online - fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 1, - is_managed=True, is_online=True) + fake_subcloud4 = self.create_subcloud( + self.ctxt, "subcloud4", 1, is_managed=True, is_online=True + ) self.update_subcloud_status(self.ctxt, fake_subcloud4.id) um = sw_update_manager.SwUpdateManager() @@ -1155,10 +1334,11 @@ class TestSwUpdateManager(base.DCManagerTestCase): strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['max-parallel-subclouds'], 10) - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(strategy_dict['type'], consts.SW_UPDATE_TYPE_PATCH) + self.assertEqual(strategy_dict["max-parallel-subclouds"], 10) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(strategy_dict["type"], consts.SW_UPDATE_TYPE_PATCH) # Verify the strategy step list subcloud_ids = [2, 3, 4] @@ -1166,123 +1346,171 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_with_force_option( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Subcloud 1 will be upgraded because force is true - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', self.fake_group3.id, - is_managed=True, is_online=False) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=False, + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 2 will be upgraded - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud2.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud2.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 3 will not be upgraded because it is already load in-sync - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud3.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud3 = self.create_subcloud( + self.ctxt, + "subcloud3", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud3.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_IN_SYNC, + ) data = copy.copy(FAKE_SW_UPDATE_DATA) data["type"] = consts.SW_UPDATE_TYPE_UPGRADE data["force"] = "true" - data['subcloud_group'] = str(self.fake_group3.id) + data["subcloud_group"] = str(self.fake_group3.id) um = sw_update_manager.SwUpdateManager() strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(strategy_dict['type'], consts.SW_UPDATE_TYPE_UPGRADE) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(strategy_dict["type"], consts.SW_UPDATE_TYPE_UPGRADE) subcloud_ids = [1, 2] strategy_step_list = db_api.strategy_step_get_all(self.ctxt) for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_without_force_option( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # Subcloud 1 will not be upgraded - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', self.fake_group3.id, - is_managed=True, is_online=False) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + fake_subcloud1 = self.create_subcloud( + self.ctxt, + "subcloud1", + self.fake_group3.id, + is_managed=True, + is_online=False, + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 2 will be upgraded - fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud2.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) + fake_subcloud2 = self.create_subcloud( + self.ctxt, + "subcloud2", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud2.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, + ) # Subcloud 3 will not be upgraded because it is already load in-sync - fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', self.fake_group3.id, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud3.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud3 = self.create_subcloud( + self.ctxt, + "subcloud3", + self.fake_group3.id, + is_managed=True, + is_online=True, + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud3.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_IN_SYNC, + ) data = copy.copy(FAKE_SW_UPDATE_DATA) data["type"] = consts.SW_UPDATE_TYPE_UPGRADE data["force"] = "false" - data['subcloud_group'] = str(self.fake_group3.id) + data["subcloud_group"] = str(self.fake_group3.id) um = sw_update_manager.SwUpdateManager() strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(strategy_dict['type'], consts.SW_UPDATE_TYPE_UPGRADE) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(strategy_dict["type"], consts.SW_UPDATE_TYPE_UPGRADE) subcloud_ids = [2] strategy_step_list = db_api.strategy_step_get_all(self.ctxt) for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_create_sw_update_strategy_not_in_sync_offline_subcloud_with_force_upgrade( - self, mock_patch_orch_thread): - + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_create_sw_update_strategy_not_insync_offline_sc_with_force_upgrade( + self, mock_patch_orch_thread + ): # This test verifies the offline subcloud is added to the strategy # because force option is specified in the upgrade request. - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=False) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_UNKNOWN) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=False + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_UNKNOWN, + ) um = sw_update_manager.SwUpdateManager() data = copy.copy(FAKE_SW_UPDATE_DATA) data["type"] = consts.SW_UPDATE_TYPE_UPGRADE data["force"] = "true" - data["cloud_name"] = 'subcloud1' + data["cloud_name"] = "subcloud1" strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data) # Assert that values passed through CLI are used instead of group values - self.assertEqual(strategy_dict['subcloud-apply-type'], - consts.SUBCLOUD_APPLY_TYPE_PARALLEL) - self.assertEqual(strategy_dict['type'], consts.SW_UPDATE_TYPE_UPGRADE) + self.assertEqual( + strategy_dict["subcloud-apply-type"], consts.SUBCLOUD_APPLY_TYPE_PARALLEL + ) + self.assertEqual(strategy_dict["type"], consts.SW_UPDATE_TYPE_UPGRADE) # Verify the strategy step list subcloud_ids = [1] @@ -1290,167 +1518,180 @@ class TestSwUpdateManager(base.DCManagerTestCase): for index, strategy_step in enumerate(strategy_step_list): self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_in_sync_offline_subcloud_with_force_upgrade( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # This test verifies that a bad request exception is raised even # though force option is specified in the request because the load sync # status of the offline subcloud is in-sync. - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=False) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_IN_SYNC) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=False + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_IN_SYNC, + ) um = sw_update_manager.SwUpdateManager() data = copy.copy(FAKE_SW_UPDATE_DATA) data["type"] = consts.SW_UPDATE_TYPE_UPGRADE data["force"] = True - data["cloud_name"] = 'subcloud1' + data["cloud_name"] = "subcloud1" - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=data) + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=data, + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_online_subcloud_with_force_upgrade( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # This test verifies that the force option has no effect in # upgrade creation strategy if the subcloud is online. A bad request # exception will be raised if the subcloud load sync status is # unknown. - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=True) - self.update_subcloud_status(self.ctxt, - fake_subcloud1.id, - dccommon_consts.ENDPOINT_TYPE_LOAD, - dccommon_consts.SYNC_STATUS_UNKNOWN) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=True + ) + self.update_subcloud_status( + self.ctxt, + fake_subcloud1.id, + dccommon_consts.ENDPOINT_TYPE_LOAD, + dccommon_consts.SYNC_STATUS_UNKNOWN, + ) um = sw_update_manager.SwUpdateManager() data = copy.copy(FAKE_SW_UPDATE_DATA) data["type"] = consts.SW_UPDATE_TYPE_UPGRADE data["force"] = True - data["cloud_name"] = 'subcloud1' + data["cloud_name"] = "subcloud1" - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=data) + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=data, + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_create_sw_update_strategy_offline_subcloud_with_force_patching( - self, mock_patch_orch_thread): - + self, mock_patch_orch_thread + ): # This test verifies that the force option has no effect in # patching creation strategy even though the subcloud is offline - fake_subcloud1 = self.create_subcloud(self.ctxt, 'subcloud1', 1, - is_managed=True, is_online=False) + fake_subcloud1 = self.create_subcloud( + self.ctxt, "subcloud1", 1, is_managed=True, is_online=False + ) self.update_subcloud_status(self.ctxt, fake_subcloud1.id) um = sw_update_manager.SwUpdateManager() data = copy.copy(FAKE_SW_UPDATE_DATA) data["force"] = True - data["cloud_name"] = 'subcloud1' + data["cloud_name"] = "subcloud1" # No strategy step is created when all subclouds are offline, # should raise 'Bad strategy request: Strategy has no steps to apply' - self.assertRaises(exceptions.BadRequest, - um.create_sw_update_strategy, - self.ctxt, payload=data) + self.assertRaises( + exceptions.BadRequest, + um.create_sw_update_strategy, + self.ctxt, + payload=data, + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_delete_sw_update_strategy(self, mock_patch_orch_thread): - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_INITIAL) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_INITIAL + ) um = sw_update_manager.SwUpdateManager() deleted_strategy = um.delete_sw_update_strategy(self.ctxt) - self.assertEqual(deleted_strategy['state'], - consts.SW_UPDATE_STATE_DELETING) + self.assertEqual(deleted_strategy["state"], consts.SW_UPDATE_STATE_DELETING) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_delete_sw_update_strategy_scoped(self, mock_patch_orch_thread): - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_INITIAL) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_INITIAL + ) um = sw_update_manager.SwUpdateManager() deleted_strategy = um.delete_sw_update_strategy( - self.ctxt, - update_type=consts.SW_UPDATE_TYPE_PATCH) - self.assertEqual(deleted_strategy['state'], - consts.SW_UPDATE_STATE_DELETING) + self.ctxt, update_type=consts.SW_UPDATE_TYPE_PATCH + ) + self.assertEqual(deleted_strategy["state"], consts.SW_UPDATE_STATE_DELETING) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') + @mock.patch.object(sw_update_manager, "PatchOrchThread") def test_delete_sw_update_strategy_bad_scope(self, mock_patch_orch_thread): - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_INITIAL) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_INITIAL + ) um = sw_update_manager.SwUpdateManager() # the strategy is PATCH. The delete for UPGRADE should fail - self.assertRaises(exceptions.NotFound, - um.delete_sw_update_strategy, - self.ctx, - update_type=consts.SW_UPDATE_TYPE_UPGRADE) + self.assertRaises( + exceptions.NotFound, + um.delete_sw_update_strategy, + self.ctx, + update_type=consts.SW_UPDATE_TYPE_UPGRADE, + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_delete_sw_update_strategy_invalid_state( - self, mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_delete_sw_update_strategy_invalid_state(self, mock_patch_orch_thread): # Create fake strategy - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_APPLYING) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_APPLYING + ) um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.delete_sw_update_strategy, - self.ctxt) + self.assertRaises( + exceptions.BadRequest, um.delete_sw_update_strategy, self.ctxt + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_apply_sw_update_strategy(self, - mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_apply_sw_update_strategy(self, mock_patch_orch_thread): # Create fake strategy - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_INITIAL) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_INITIAL + ) um = sw_update_manager.SwUpdateManager() updated_strategy = um.apply_sw_update_strategy(self.ctxt) - self.assertEqual(updated_strategy['state'], consts.SW_UPDATE_STATE_APPLYING) + self.assertEqual(updated_strategy["state"], consts.SW_UPDATE_STATE_APPLYING) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_apply_sw_update_strategy_invalid_state( - self, mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_apply_sw_update_strategy_invalid_state(self, mock_patch_orch_thread): # Create fake strategy - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_APPLYING) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_APPLYING + ) um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.apply_sw_update_strategy, - self.ctxt) + self.assertRaises( + exceptions.BadRequest, um.apply_sw_update_strategy, self.ctxt + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_abort_sw_update_strategy( - self, mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_abort_sw_update_strategy(self, mock_patch_orch_thread): # Create fake strategy - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_APPLYING) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_APPLYING + ) um = sw_update_manager.SwUpdateManager() aborted_strategy = um.abort_sw_update_strategy(self.ctxt) - self.assertEqual(aborted_strategy['state'], consts.SW_UPDATE_STATE_ABORT_REQUESTED) + self.assertEqual( + aborted_strategy["state"], consts.SW_UPDATE_STATE_ABORT_REQUESTED + ) - @mock.patch.object(sw_update_manager, 'PatchOrchThread') - def test_abort_sw_update_strategy_invalid_state( - self, mock_patch_orch_thread): + @mock.patch.object(sw_update_manager, "PatchOrchThread") + def test_abort_sw_update_strategy_invalid_state(self, mock_patch_orch_thread): # Create fake strategy - self.create_strategy(self.ctxt, - consts.SW_UPDATE_TYPE_PATCH, - consts.SW_UPDATE_STATE_COMPLETE) + self.create_strategy( + self.ctxt, consts.SW_UPDATE_TYPE_PATCH, consts.SW_UPDATE_STATE_COMPLETE + ) um = sw_update_manager.SwUpdateManager() - self.assertRaises(exceptions.BadRequest, - um.apply_sw_update_strategy, - self.ctxt) + self.assertRaises( + exceptions.BadRequest, um.apply_sw_update_strategy, self.ctxt + ) diff --git a/distributedcloud/dcmanager/tests/utils.py b/distributedcloud/dcmanager/tests/utils.py index 4bc68da68..024f92fc1 100644 --- a/distributedcloud/dcmanager/tests/utils.py +++ b/distributedcloud/dcmanager/tests/utils.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB -# Copyright (c) 2017-2022 Wind River Systems, Inc. +# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,15 +15,15 @@ # under the License. # -import eventlet import random import string import uuid +import eventlet + from dcmanager.common import context from dcmanager.db import api as db_api - get_engine = db_api.get_engine @@ -41,7 +41,7 @@ class UUIDStub(object): UUIDs = (UUID1, UUID2, UUID3, UUID4, UUID5) = sorted([str(uuid.uuid4()) - for x in range(5)]) + for x in range(5)]) def random_name(): diff --git a/distributedcloud/tox.ini b/distributedcloud/tox.ini index 67b95322a..b7b524b33 100644 --- a/distributedcloud/tox.ini +++ b/distributedcloud/tox.ini @@ -72,19 +72,13 @@ commands = oslo_debug_helper {posargs} [flake8] show-source = True +max-line-length = 85 # Suppressed flake8 codes # W503 line break before binary operator # W504 line break after binary operator # W605 invalid escape sequence -# E117 over-indented -# E123 closing bracket does not match indentation of opening bracket's line -# E125 continuation line with same indent as next logical line -# E305 expected 2 blank lines after class or function definition -# E402 module level import not at top of file -# E501 line too long # E731 do not assign a lambda expression, use a def -# H216 flag use of third party mock -ignore = W503,W504,W605,H216,E117,E123,E125,E305,E402,E501,E731 +ignore = W503,W504,W605,E731 builtins = _ [testenv:genconfig] @@ -99,4 +93,4 @@ setenv = {[testenv]setenv} PYTHONPATH = {toxinidir} commands = - pylint {posargs} dccommon dcmanager dcorch dcdbsync --rcfile=./.pylintrc + pylint {posargs} dccommon dcdbsync dcmanager dcorch --rcfile=./.pylintrc