Update tox pylint/pep8 for dcmanager

This commit enables the check of new pylint/pep8
violations.

PYLINT - All convention related checks, except:
- missing-class-docstring
- missing-function-docstring
- missing-module-docstring
- consider-using-f-string
- invalid-name
- import-outside-toplevel
- too-many-lines
- consider-iterating-dictionary
- unnecessary-lambda-assignment

PEP8:
- E117: over-indented
- E123: closing bracket does not match indentation
  of opening bracket's line
- E125: continuation line with the same indent as the next
  logical line
- E305: expected 2 blank lines after class or function
  definition
- E402: module level import not at top of file
- E501: line too long
- H216: flag use of third party mock

Test Plan:
1. Perform `tox` command
- Pass in py39, pylint, pep8

Closes-bug: 2033294

Change-Id: I635df8e809905cff582bd9d5eb57b91133560cf9
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito 2023-08-28 19:35:23 -03:00 committed by Hugo Nicodemos
parent 766f052295
commit 4438b8fd55
147 changed files with 6979 additions and 5321 deletions

View File

@ -22,127 +22,50 @@ load-plugins=
[MESSAGES CONTROL]
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time.
#
# Python3 checker:
#
# E1601: print-statement
# E1602: parameter-unpacking
# E1603: unpacking-in-except
# E1604: old-raise-syntax
# E1605: backtick
# E1609: import-star-module-level
# W1601: apply-builtin
# W1602: basestring-builtin
# W1603: buffer-builtin
# W1604: cmp-builtin
# W1605: coerce-builtin
# W1606: execfile-builtin
# W1607: file-builtin
# W1608: long-builtin
# W1609: raw_input-builtin
# W1610: reduce-builtin
# W1611: standarderror-builtin
# W1612: unicode-builtin
# W1613: xrange-builtin
# W1614: coerce-method
# W1615: delslice-method
# W1616: getslice-method
# W1617: setslice-method
# W1618: no-absolute-import
# W1619: old-division
# W1620: dict-iter-method
# W1621: dict-view-method
# W1622: next-method-called
# W1623: metaclass-assignment
# W1624: indexing-exception
# W1625: raising-string
# W1626: reload-builtin
# W1627: oct-method
# W1628: hex-method
# W1629: nonzero-method
# W1630: cmp-method
# W1632: input-builtin
# W1633: round-builtin
# W1634: intern-builtin
# W1635: unichr-builtin
# W1636: map-builtin-not-iterating
# W1637: zip-builtin-not-iterating
# W1638: range-builtin-not-iterating
# W1639: filter-builtin-not-iterating
# W1640: using-cmp-argument
# W1642: div-method
# W1643: idiv-method
# W1644: rdiv-method
# W1645: exception-message-attribute
# W1646: invalid-str-codec
# W1647: sys-max-int
# W1648: bad-python3-import
# W1649: deprecated-string-function
# W1650: deprecated-str-translate-call
# W1651: deprecated-itertools-function
# W1652: deprecated-types-field
# W1653: next-method-defined
# W1654: dict-items-not-iterating
# W1655: dict-keys-not-iterating
# W1656: dict-values-not-iterating
# W1657: deprecated-operator-function
# W1658: deprecated-urllib-function
# W1659: xreadlines-attribute
# W1660: deprecated-sys-function
# W1661: exception-escape
# W1662: comprehension-escape
enable=E1603,E1609,E1602,E1605,E1604,E1601,W1652,W1651,W1649,W1657,W1660,W1658,
W1659,W1623,W1622,W1620,W1621,W1645,W1624,W1648,W1625,W1611,W1662,W1661,
W1650,W1640,W1630,W1614,W1615,W1642,W1616,W1628,W1643,W1629,W1627,W1644,
W1617,W1601,W1602,W1603,W1604,W1605,W1654,W1655,W1656,W1619,W1606,W1607,
W1639,W1618,W1632,W1634,W1608,W1636,W1653,W1646,W1638,W1609,W1610,W1626,
W1633,W1647,W1635,W1612,W1613,W1637
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once).
# https://pylint.readthedocs.io/en/latest/user_guide/output.html#source-code-analysis-section
# R detect Refactor for a "good practice" metric violation
# C detect Convention for coding standard violation
# W detect Warning for stylistic problems, or minor programming issues
# W0102: dangerous-default-value
# W0105: pointless-string-statement
# W0107: unnecessary-pass
# W0123: eval-used
# W0201: attribute-defined-outside-init
# W0211: bad-staticmethod-argument
# W0212: protected-access
# W0221: arguments-differ
# W0223: abstract-method
# W0231: super-init-not-called
# W0235: useless-super-delegation
# W0311: bad-indentation
# W0402: deprecated-module
# W0603: global-statement
# W0612: unused-variable
# W0613: unused-argument
# W0621: redefined-outer-name
# W0622: redefined-builtin
# W0631: undefined-loop-variable
# W0703: broad-except
# W0706: try-except-raise
# W0707: raise-missing-from
# W1113: keyword-arg-before-vararg
# W1201: logging-not-lazy
# W1401: anomalous-backslash-in-string
# W1406: redundant-u-string-prefix
# W1514: unspecified-encoding
# W1618: no-absolute-import
disable=C,R,fixme,
W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221,
W0223,W0231,W0235,W0311,W0402,W0603,W0612,W0613,
W0621,W0622,W0631,W0703,W0706,W0707,W1113,W1201,
W1401,W1406,W1514,W1618
disable=R,fixme,
dangerous-default-value,
pointless-string-statement,
unnecessary-pass,
eval-used,
attribute-defined-outside-init,
bad-staticmethod-argument,
protected-access,
arguments-differ,
abstract-method,
super-init-not-called,
useless-super-delegation,
deprecated-module,
global-statement,
unused-variable,
unused-argument,
redefined-outer-name,
redefined-builtin,
undefined-loop-variable,
broad-except,
try-except-raise,
raise-missing-from,
keyword-arg-before-vararg,
logging-not-lazy,
anomalous-backslash-in-string,
redundant-u-string-prefix,
unspecified-encoding,
no-absolute-import,
missing-class-docstring,
missing-function-docstring,
missing-module-docstring,
consider-using-f-string,
invalid-name,
import-outside-toplevel,
too-many-lines,
consider-iterating-dictionary,
unnecessary-lambda-assignment
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -42,9 +42,9 @@ def setup_app(*args, **kwargs):
'errors': {
400: '/error',
'__force_dict__': True
}
}
}
}
pecan_config = pecan.configuration.conf_from_dict(config)

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -42,13 +42,13 @@ class RootController(object):
{
"rel": "self",
"href": pecan.request.application_url + "/v1.0/"
}
],
}
],
"id": "v1.0",
"updated": "2017-10-2"
}
]
}
}
]
}
@index.when(method='POST')
@index.when(method='PUT')

View File

@ -1,5 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -15,15 +15,15 @@
# under the License.
#
from oslo_log import log as logging
from pecan import expose
from dcmanager.api.controllers import restcomm
from dcmanager.api.policies import alarm_manager as alarm_manager_policy
from dcmanager.api import policy
from dcmanager.common import consts
from dcmanager.db import api as db_api
from oslo_log import log as logging
from pecan import expose
LOG = logging.getLogger(__name__)

View File

@ -1,21 +1,22 @@
# Copyright (c) 2021 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import http.client as httpclient
from oslo_config import cfg
from oslo_log import log as logging
import http.client as httpclient
import pecan
from pecan import expose
from pecan import request
@ -23,7 +24,6 @@ from pecan import request
from dcmanager.api.controllers import restcomm
from dcmanager.audit import rpcapi as audit_rpc_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -150,7 +150,7 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
return False
# Less than min or greater than max priority is not supported.
if val < MIN_PEER_GROUP_ASSOCIATION_PRIORITY or \
val > MAX_PEER_GROUP_ASSOCIATION_PRIORITY:
val > MAX_PEER_GROUP_ASSOCIATION_PRIORITY:
LOG.debug("Invalid Peer Group Priority out of support range: %s"
% peer_group_priority)
return False
@ -187,7 +187,7 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
peer_group = db_api.subcloud_peer_group_get(context, peer_group_id)
if peer_group_priority is not None and not \
self._validate_peer_group_priority(peer_group_priority):
self._validate_peer_group_priority(peer_group_priority):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer_group_priority'))

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -278,9 +278,12 @@ class PhasedSubcloudDeployController(object):
utils.get_management_gateway_address(payload)
subcloud_dict['management-start-ip'] = \
utils.get_management_start_address(payload)
subcloud_dict['management-end-ip'] = utils.get_management_end_address(payload)
subcloud_dict['management-end-ip'] = \
utils.get_management_end_address(payload)
subcloud_dict['systemcontroller-gateway-ip'] = payload.get(
"systemcontroller_gateway_address", subcloud.systemcontroller_gateway_ip)
"systemcontroller_gateway_address",
subcloud.systemcontroller_gateway_ip
)
return subcloud_dict
except RemoteError as e:
@ -425,10 +428,13 @@ class PhasedSubcloudDeployController(object):
# Consider the incoming release parameter only if install is one
# of the pending deploy states
if INSTALL in deploy_states_to_run:
unvalidated_sw_version = payload.get('release', subcloud.software_version)
unvalidated_sw_version = \
payload.get('release', subcloud.software_version)
else:
LOG.debug('Disregarding release parameter for %s as installation is complete.'
% subcloud.name)
LOG.debug(
'Disregarding release parameter for %s as installation is complete.'
% subcloud.name
)
unvalidated_sw_version = subcloud.software_version
# get_sw_version will simply return back the passed
@ -474,9 +480,12 @@ class PhasedSubcloudDeployController(object):
utils.get_management_gateway_address(payload)
subcloud_dict['management-start-ip'] = \
utils.get_management_start_address(payload)
subcloud_dict['management-end-ip'] = utils.get_management_end_address(payload)
subcloud_dict['management-end-ip'] = \
utils.get_management_end_address(payload)
subcloud_dict['systemcontroller-gateway-ip'] = payload.get(
"systemcontroller_gateway_address", subcloud.systemcontroller_gateway_ip)
"systemcontroller_gateway_address",
subcloud.systemcontroller_gateway_ip
)
return subcloud_dict
except RemoteError as e:
pecan.abort(422, e.value)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2022-2023 Wind River Systems, Inc.
# Copyright (c) 2022-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -98,7 +98,8 @@ class SubcloudBackupController(object):
if param in request.POST:
file_item = request.POST[param]
file_item.file.seek(0, os.SEEK_SET)
data = utils.yaml_safe_load(file_item.file.read().decode('utf8'), param)
data = \
utils.yaml_safe_load(file_item.file.read().decode('utf8'), param)
payload.update({param: data})
del request.POST[param]
@ -176,7 +177,8 @@ class SubcloudBackupController(object):
operation (string): Subcloud backup operation
"""
subclouds = request_entity.subclouds
error_msg = _('Subcloud(s) must be in a valid state for backup %s.' % operation)
error_msg = _(
'Subcloud(s) must be in a valid state for backup %s.' % operation)
has_valid_subclouds = False
valid_subclouds = list()
for subcloud in subclouds:
@ -361,8 +363,10 @@ class SubcloudBackupController(object):
payload.get('restore_values', {}).get('bootstrap_address', {})
if not isinstance(bootstrap_address_dict, dict):
pecan.abort(400, _('The bootstrap_address provided in restore_values '
'is in invalid format.'))
pecan.abort(
400, _('The bootstrap_address provided in restore_values '
'is in invalid format.')
)
restore_subclouds = self._validate_subclouds(
request_entity, verb, bootstrap_address_dict)
@ -376,13 +380,16 @@ class SubcloudBackupController(object):
]
if subclouds_without_install_values:
subclouds_str = ', '.join(subclouds_without_install_values)
pecan.abort(400, _('The restore operation was requested with_install, '
'but the following subcloud(s) does not contain '
'install values: %s' % subclouds_str))
pecan.abort(
400, _('The restore operation was requested with_install, '
'but the following subcloud(s) does not contain '
'install values: %s' % subclouds_str)
)
# Confirm the requested or active load is still in dc-vault
payload['software_version'] = utils.get_sw_version(
payload.get('release'))
matching_iso, err_msg = utils.get_matching_iso(payload['software_version'])
matching_iso, err_msg = \
utils.get_matching_iso(payload['software_version'])
if err_msg:
LOG.exception(err_msg)
pecan.abort(400, _(err_msg))
@ -391,8 +398,10 @@ class SubcloudBackupController(object):
try:
# local update to deploy_status - this is just for CLI response
# pylint: disable-next=consider-using-enumerate
for i in range(len(restore_subclouds)):
restore_subclouds[i].deploy_status = consts.DEPLOY_STATE_PRE_RESTORE
restore_subclouds[i].deploy_status = (
consts.DEPLOY_STATE_PRE_RESTORE)
message = self.dcmanager_rpc_client.restore_subcloud_backups(
context, payload)
return utils.subcloud_db_list_to_dict(restore_subclouds)

View File

@ -12,17 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import http.client as httpclient
from oslo_config import cfg
from oslo_log import log as logging
import http.client as httpclient
import pecan
from pecan import expose
from pecan import request
@ -35,7 +34,6 @@ from dcmanager.common import consts
from dcmanager.common.i18n import _
from dcmanager.common import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -96,16 +94,19 @@ class SubcloudDeployController(object):
if len(missing_options) > 0:
if ((consts.DEPLOY_PRESTAGE in missing_options and size != 1) or
(consts.DEPLOY_PRESTAGE not in missing_options and size != 3)):
missing_str = str()
for missing in missing_options:
if missing is not consts.DEPLOY_PRESTAGE:
missing_str += '--%s ' % missing
error_msg = "error: argument %s is required" % missing_str.rstrip()
pecan.abort(httpclient.BAD_REQUEST, error_msg)
missing_str = str()
for missing in missing_options:
if missing is not consts.DEPLOY_PRESTAGE:
missing_str += '--%s ' % missing
error_msg = "error: argument %s is required" % missing_str.rstrip()
pecan.abort(httpclient.BAD_REQUEST, error_msg)
deploy_dicts['software_version'] = utils.get_sw_version(request.POST.get('release'))
deploy_dicts['software_version'] = \
utils.get_sw_version(request.POST.get('release'))
dir_path = os.path.join(dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version'])
dir_path = os.path.join(
dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version']
)
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
if f not in request.POST:
continue
@ -139,7 +140,9 @@ class SubcloudDeployController(object):
restcomm.extract_credentials_for_policy())
deploy_dicts = dict()
deploy_dicts['software_version'] = utils.get_sw_version(release)
dir_path = os.path.join(dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version'])
dir_path = os.path.join(
dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version']
)
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
filename = None
if os.path.isdir(dir_path):
@ -159,10 +162,13 @@ class SubcloudDeployController(object):
policy.authorize(subcloud_deploy_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
is_prestage_images = request.params.get('prestage_images', '').lower() == 'true'
is_deployment_files = request.params.get('deployment_files', '').lower() == 'true'
is_prestage_images = \
request.params.get('prestage_images', '').lower() == 'true'
is_deployment_files = \
request.params.get('deployment_files', '').lower() == 'true'
dir_path = os.path.join(dccommon_consts.DEPLOY_DIR, utils.get_sw_version(release))
dir_path = \
os.path.join(dccommon_consts.DEPLOY_DIR, utils.get_sw_version(release))
if not os.path.isdir(dir_path):
pecan.abort(httpclient.NOT_FOUND,
_("Directory not found: %s" % dir_path))

View File

@ -1,5 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -15,12 +15,11 @@
# under the License.
#
import http.client as httpclient
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_messaging import RemoteError
import http.client as httpclient
import pecan
from pecan import expose
from pecan import request

View File

@ -1,19 +1,21 @@
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import json
import uuid
import http.client as httpclient
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_messaging import RemoteError
import http.client as httpclient
import json
import pecan
from pecan import expose
from pecan import request
import uuid
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
@ -140,7 +142,8 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
if group is None:
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Peer Group not found"))
if verb is None:
subcloud_peer_group_dict = db_api.subcloud_peer_group_db_model_to_dict(group)
subcloud_peer_group_dict = \
db_api.subcloud_peer_group_db_model_to_dict(group)
return subcloud_peer_group_dict
elif verb == 'subclouds':
# Return only the subclouds for this subcloud peer group
@ -285,29 +288,31 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
if (peer_group_name is not None and
not utils.validate_name(peer_group_name,
prohibited_name_list=['none'])):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer-group-name'))
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer-group-name'))
if (group_priority is not None and
not self._validate_group_priority(group_priority)):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group-priority'))
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group-priority'))
if group_state and not self._validate_group_state(group_state):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group-state'))
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group-state'))
if (max_subcloud_rehoming is not None and
not self._validate_max_subcloud_rehoming(max_subcloud_rehoming)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid max-subcloud-rehoming'))
if (system_leader_id and
not self._validate_system_leader_id(system_leader_id)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-id'))
not self._validate_system_leader_id(system_leader_id)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-id'))
if (system_leader_name is not None and
not utils.validate_name(system_leader_name)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-name'))
not utils.validate_name(system_leader_name)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-name'))
if (migration_status and
migration_status.lower() not in [consts.PEER_GROUP_MIGRATING,
consts.PEER_GROUP_MIGRATION_COMPLETE,
consts.PEER_GROUP_MIGRATION_NONE]):
migration_status.lower() not in [
consts.PEER_GROUP_MIGRATING,
consts.PEER_GROUP_MIGRATION_COMPLETE,
consts.PEER_GROUP_MIGRATION_NONE
]):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid migration_status'))
@ -322,7 +327,9 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
system_leader_id=system_leader_id,
system_leader_name=system_leader_name,
migration_status=migration_status)
return db_api.subcloud_peer_group_db_model_to_dict(updated_peer_group)
return db_api.subcloud_peer_group_db_model_to_dict(
updated_peer_group
)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
@ -427,31 +434,31 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
payload = json.loads(request.body)
if 'peer_uuid' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing peer_uuid' %
'%s, missing peer_uuid' %
group.peer_group_name))
if 'peer_group_name' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing peer_group_name' %
'%s, missing peer_group_name' %
group.peer_group_name))
if 'group_priority' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing group_priority' %
'%s, missing group_priority' %
group.peer_group_name))
if 'group_state' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing group_state' %
'%s, missing group_state' %
group.peer_group_name))
if 'system_leader_id' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing system_leader_id' %
'%s, missing system_leader_id' %
group.peer_group_name))
if 'system_leader_name' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing system_leader_name' %
'%s, missing system_leader_name' %
group.peer_group_name))
if 'migration_status' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing migration_status' %
'%s, missing migration_status' %
group.peer_group_name))
try:
msg = self.rpc_client.peer_group_audit_notify(

View File

@ -17,29 +17,32 @@
# SPDX-License-Identifier: Apache-2.0
#
from requests_toolbelt.multipart import decoder
import base64
import json
import keyring
import os
import re
import keyring
from oslo_config import cfg
from oslo_log import log as logging
from oslo_messaging import RemoteError
import re
from requests_toolbelt.multipart import decoder
import pecan
from pecan import expose
from pecan import request
from fm_api.constants import FM_ALARM_ID_UNSYNCHRONIZED_RESOURCE
from keystoneauth1 import exceptions as keystone_exceptions
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.fm import FmClient
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon import exceptions as dccommon_exceptions
from keystoneauth1 import exceptions as keystone_exceptions
from dcmanager.api.controllers import restcomm
from dcmanager.api.policies import subclouds as subclouds_policy
from dcmanager.api import policy
@ -52,8 +55,6 @@ from dcmanager.common import utils
from dcmanager.db import api as db_api
from dcmanager.rpc import client as rpc_client
from fm_api.constants import FM_ALARM_ID_UNSYNCHRONIZED_RESOURCE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -263,7 +264,9 @@ class SubcloudsController(object):
LOG.error(message)
return None
def _get_deploy_config_sync_status(self, context, subcloud_name, keystone_client):
def _get_deploy_config_sync_status(
self, context, subcloud_name, keystone_client
):
"""Get the deploy configuration insync status of the subcloud """
detected_alarms = None
try:
@ -371,7 +374,7 @@ class SubcloudsController(object):
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
db_api.subcloud_endpoint_status_db_model_to_dict(
subcloud_status))
subcloud_list[-1][
consts.ENDPOINT_SYNC_STATUS] = subcloud_status_list
@ -380,7 +383,7 @@ class SubcloudsController(object):
subcloud_status_list = []
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
db_api.subcloud_endpoint_status_db_model_to_dict(
subcloud_status))
subcloud_list.append(subcloud_dict)
@ -457,13 +460,15 @@ class SubcloudsController(object):
if detail is not None:
oam_floating_ip = "unavailable"
deploy_config_sync_status = "unknown"
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE:
if (subcloud.availability_status ==
dccommon_consts.AVAILABILITY_ONLINE):
# Get the keystone client that will be used
# for _get_deploy_config_sync_status and _get_oam_addresses
sc_ks_client = psd_common.get_ks_client(subcloud_region)
oam_addresses = self._get_oam_addresses(context,
subcloud_region, sc_ks_client)
oam_addresses = self._get_oam_addresses(
context, subcloud_region, sc_ks_client
)
if oam_addresses is not None:
oam_floating_ip = oam_addresses.oam_floating_ip
@ -472,9 +477,11 @@ class SubcloudsController(object):
if deploy_config_state is not None:
deploy_config_sync_status = deploy_config_state
extra_details = {"oam_floating_ip": oam_floating_ip,
"deploy_config_sync_status": deploy_config_sync_status,
"region_name": subcloud_region}
extra_details = {
"oam_floating_ip": oam_floating_ip,
"deploy_config_sync_status": deploy_config_sync_status,
"region_name": subcloud_region
}
subcloud_dict.update(extra_details)
return subcloud_dict
@ -594,26 +601,39 @@ class SubcloudsController(object):
# Rename the subcloud
new_subcloud_name = payload.get('name')
if new_subcloud_name is not None:
# To be renamed the subcloud must be in unmanaged and valid deploy state
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED \
or subcloud.deploy_status not in consts.STATES_FOR_SUBCLOUD_RENAME:
msg = ('Subcloud %s must be unmanaged and in a valid deploy state '
'for the subcloud rename operation.' % subcloud.name)
# To be renamed the subcloud must be in unmanaged and valid deploy
# state
if (subcloud.management_state !=
dccommon_consts.MANAGEMENT_UNMANAGED or
subcloud.deploy_status not in
consts.STATES_FOR_SUBCLOUD_RENAME):
msg = (
'Subcloud %s must be unmanaged and in a valid deploy state '
'for the subcloud rename operation.' %
subcloud.name
)
pecan.abort(400, msg)
# Validates new name
if not utils.is_subcloud_name_format_valid(new_subcloud_name):
pecan.abort(400, _("new name must contain alphabetic characters"))
pecan.abort(
400, _("new name must contain alphabetic characters")
)
# Checks if new subcloud name is the same as the current subcloud
if new_subcloud_name == subcloud.name:
pecan.abort(400, _('Provided subcloud name %s is the same as the '
'current subcloud %s. A different name is '
'required to rename the subcloud' %
(new_subcloud_name, subcloud.name)))
pecan.abort(
400, _('Provided subcloud name %s is the same as the '
'current subcloud %s. A different name is '
'required to rename the subcloud' %
(new_subcloud_name, subcloud.name))
)
error_msg = (
'Unable to rename subcloud %s with their region %s to %s' %
(subcloud.name, subcloud.region_name, new_subcloud_name)
)
error_msg = ('Unable to rename subcloud %s with their region %s to %s' %
(subcloud.name, subcloud.region_name, new_subcloud_name))
try:
LOG.info("Renaming subcloud %s to: %s\n" % (subcloud.name,
new_subcloud_name))
@ -720,12 +740,14 @@ class SubcloudsController(object):
if pgrp.group_priority > 0:
pecan.abort(400, _("Cannot set the subcloud to a peer"
" group with non-zero priority."))
elif not (subcloud.deploy_status in [consts.DEPLOY_STATE_DONE,
consts.PRESTAGE_STATE_COMPLETE]
and subcloud.management_state ==
dccommon_consts.MANAGEMENT_MANAGED
and subcloud.availability_status ==
dccommon_consts.AVAILABILITY_ONLINE):
elif not (
subcloud.deploy_status in [
consts.DEPLOY_STATE_DONE,
consts.PRESTAGE_STATE_COMPLETE
] and subcloud.management_state ==
dccommon_consts.MANAGEMENT_MANAGED
and subcloud.availability_status ==
dccommon_consts.AVAILABILITY_ONLINE):
pecan.abort(400, _("Only subclouds that are "
"managed and online can be "
"added to a peer group."))
@ -770,13 +792,15 @@ class SubcloudsController(object):
payload = psd_common.get_request_data(
request, subcloud, SUBCLOUD_REDEPLOY_GET_FILE_CONTENTS)
if (subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE or
subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED):
if (subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE
or subcloud.management_state ==
dccommon_consts.MANAGEMENT_MANAGED):
msg = _('Cannot re-deploy an online and/or managed subcloud')
LOG.warning(msg)
pecan.abort(400, msg)
payload['software_version'] = utils.get_sw_version(payload.get('release'))
payload['software_version'] = \
utils.get_sw_version(payload.get('release'))
# Don't load previously stored bootstrap_values if they are present in
# the request, as this would override the already loaded values from it.
@ -831,8 +855,10 @@ class SubcloudsController(object):
'Please use /v1.0/subcloud-backup/restore'))
elif verb == "reconfigure":
pecan.abort(410, _('This API is deprecated. '
'Please use /v1.0/phased-subcloud-deploy/{subcloud}/configure'))
pecan.abort(
410, _('This API is deprecated. Please use '
'/v1.0/phased-subcloud-deploy/{subcloud}/configure')
)
elif verb == "reinstall":
pecan.abort(410, _('This API is deprecated. '

View File

@ -1,4 +1,4 @@
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -132,7 +132,7 @@ class SystemPeersController(restcomm.GenericPathController):
def _validate_manager_endpoint(self, endpoint):
if not endpoint or len(endpoint) >= MAX_SYSTEM_PEER_MANAGER_ENDPOINT_LEN or \
not endpoint.startswith(("http", "https")):
not endpoint.startswith(("http", "https")):
LOG.debug("Invalid manager_endpoint: %s" % endpoint)
return False
return True
@ -176,7 +176,7 @@ class SystemPeersController(restcomm.GenericPathController):
# We do not support less than min or greater than max
if val < MIN_SYSTEM_PEER_HEARTBEAT_INTERVAL or \
val > MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL:
val > MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL:
LOG.debug("Invalid heartbeat_interval: %s" % heartbeat_interval)
return False
return True
@ -193,15 +193,14 @@ class SystemPeersController(restcomm.GenericPathController):
# We do not support less than min or greater than max
if val < MIN_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD or \
val > MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD:
val > MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD:
LOG.debug("Invalid heartbeat_failure_threshold: %s" %
heartbeat_failure_threshold)
return False
return True
def _validate_heartbeat_failure_policy(self, heartbeat_failure_policy):
if heartbeat_failure_policy not in \
SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST:
if heartbeat_failure_policy not in SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST:
LOG.debug("Invalid heartbeat_failure_policy: %s" %
heartbeat_failure_policy)
return False
@ -219,7 +218,7 @@ class SystemPeersController(restcomm.GenericPathController):
# We do not support less than min or greater than max
if val < MIN_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT or \
val > MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT:
val > MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT:
LOG.debug("Invalid heartbeat_maintenance_timeout: %s" %
heartbeat_maintenance_timeout)
return False
@ -287,7 +286,7 @@ class SystemPeersController(restcomm.GenericPathController):
payload.get('heartbeat_failure_threshold')
if heartbeat_failure_threshold is not None:
if not self._validate_heartbeat_failure_threshold(
heartbeat_failure_threshold):
heartbeat_failure_threshold):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_threshold'))
kwargs['heartbeat_failure_threshold'] = heartbeat_failure_threshold
@ -295,7 +294,7 @@ class SystemPeersController(restcomm.GenericPathController):
heartbeat_failure_policy = payload.get('heartbeat_failure_policy')
if heartbeat_failure_policy:
if not self._validate_heartbeat_failure_policy(
heartbeat_failure_policy):
heartbeat_failure_policy):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_policy'))
kwargs['heartbeat_failure_policy'] = heartbeat_failure_policy
@ -304,7 +303,7 @@ class SystemPeersController(restcomm.GenericPathController):
payload.get('heartbeat_maintenance_timeout')
if heartbeat_maintenance_timeout is not None:
if not self._validate_heartbeat_maintenance_timeout(
heartbeat_maintenance_timeout):
heartbeat_maintenance_timeout):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_maintenance_timeout'))
kwargs['heartbeat_maintenance_timeout'] = \
@ -419,19 +418,19 @@ class SystemPeersController(restcomm.GenericPathController):
if heartbeat_failure_threshold:
if not self._validate_heartbeat_failure_threshold(
heartbeat_failure_threshold):
heartbeat_failure_threshold):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_threshold'))
if heartbeat_failure_policy:
if not self._validate_heartbeat_failure_policy(
heartbeat_failure_policy):
heartbeat_failure_policy):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_policy'))
if heartbeat_maintenance_timeout:
if not self._validate_heartbeat_maintenance_timeout(
heartbeat_maintenance_timeout):
heartbeat_maintenance_timeout):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_maintenance_timeout'))

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:alarm_manager:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:peer_group_associations:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:phased_subcloud_deploy:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_backup:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022-2023 Wind River Systems, Inc.
# Copyright (c) 2022-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_deploy:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_groups:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_peer_groups:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022-2024 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subclouds:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:sw_update_options:%s'

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:sw_update_strategy:%s'

View File

@ -1,11 +1,12 @@
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.api.policies import base
from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:system_peers:%s'

View File

@ -13,17 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""Policy Engine For DC."""
from dcmanager.api import policies as controller_policies
from oslo_config import cfg
from oslo_policy import policy
from webob import exc
from dcmanager.api import policies as controller_policies
CONF = cfg.CONF
_ENFORCER = None

View File

@ -1,22 +1,24 @@
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from dcmanager.common import consts
from dcmanager.db import api as db_api
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@ -40,10 +42,10 @@ class AlarmAggregation(object):
LOG.error('Failed to update alarms for %s error: %s' % (name, e))
def _set_cloud_status(self, alarm_dict):
if (alarm_dict.get('critical_alarms') > 0):
if alarm_dict.get('critical_alarms') > 0:
status = consts.ALARM_CRITICAL_STATUS
elif (alarm_dict.get('major_alarms') > 0) or\
(alarm_dict.get('minor_alarms') > 0):
elif (alarm_dict.get('major_alarms') > 0) or \
(alarm_dict.get('minor_alarms') > 0):
status = consts.ALARM_DEGRADED_STATUS
else:
status = consts.ALARM_OK_STATUS

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2021-2023 Wind River Systems, Inc.
# Copyright (c) 2021-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import abc
import six
@ -31,7 +32,9 @@ class Auditor(object):
def set_subcloud_endpoint_in_sync(self, sc_name, sc_region):
"""Set the endpoint sync status of this subcloud to be in sync"""
self._set_subcloud_sync_status(sc_name, sc_region, dccommon_consts.SYNC_STATUS_IN_SYNC)
self._set_subcloud_sync_status(
sc_name, sc_region, dccommon_consts.SYNC_STATUS_IN_SYNC
)
def set_subcloud_endpoint_out_of_sync(self, sc_name, sc_region):
"""Set the endpoint sync status of this subcloud to be out of sync"""

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -112,16 +112,12 @@ class FirmwareAudit(object):
# Filter images which have been applied on RegionOne
for image in local_device_images:
if image.applied:
filtered_images.append(FirmwareAuditData(image.bitstream_type,
image.bitstream_id,
image.bmc,
image.retimer_included,
image.key_signature,
image.revoke_key_id,
image.applied,
image.pci_vendor,
image.pci_device,
image.applied_labels))
filtered_images.append(FirmwareAuditData(
image.bitstream_type, image.bitstream_id, image.bmc,
image.retimer_included, image.key_signature,
image.revoke_key_id, image.applied, image.pci_vendor,
image.pci_device, image.applied_labels
))
LOG.debug("RegionOne applied_images: %s" % filtered_images)
except Exception:
LOG.exception('Cannot retrieve device images for RegionOne, '
@ -133,24 +129,33 @@ class FirmwareAudit(object):
label_key, label_value):
for device_label in subcloud_host_device_label_list:
if device_label.pcidevice_uuid and \
device_uuid == device_label.pcidevice_uuid and \
label_key == device_label.label_key and \
label_value == device_label.label_value:
device_uuid == device_label.pcidevice_uuid and \
label_key == device_label.label_key and \
label_value == device_label.label_value:
return True
return False
def _check_image_match(self,
subcloud_image,
system_controller_image):
if ((system_controller_image.bitstream_type == consts.BITSTREAM_TYPE_ROOT_KEY and
system_controller_image.key_signature == subcloud_image.key_signature) or
(system_controller_image.bitstream_type == consts.BITSTREAM_TYPE_FUNCTIONAL and
system_controller_image.bitstream_id == subcloud_image.bitstream_id and
system_controller_image.bmc == subcloud_image.bmc and
system_controller_image.retimer_included == subcloud_image.retimer_included) or
(system_controller_image.bitstream_type == consts.BITSTREAM_TYPE_KEY_REVOCATION and
system_controller_image.revoke_key_id == subcloud_image.revoke_key_id)):
return True
def _check_image_match(self, subcloud_image, system_controller_image):
if (
(
system_controller_image.bitstream_type ==
consts.BITSTREAM_TYPE_ROOT_KEY and
system_controller_image.key_signature == subcloud_image.key_signature
) or (
system_controller_image.bitstream_type ==
consts.BITSTREAM_TYPE_FUNCTIONAL and
system_controller_image.bitstream_id ==
subcloud_image.bitstream_id and
system_controller_image.bmc == subcloud_image.bmc and
system_controller_image.retimer_included ==
subcloud_image.retimer_included
) or (
system_controller_image.bitstream_type ==
consts.BITSTREAM_TYPE_KEY_REVOCATION and
system_controller_image.revoke_key_id == subcloud_image.revoke_key_id
)
):
return True
return False
def _check_subcloud_device_has_image(self,
@ -197,7 +202,7 @@ class FirmwareAudit(object):
continue
if image.pci_vendor == device.pvendor_id and \
image.pci_device == device.pdevice_id:
image.pci_device == device.pdevice_id:
device_image_state = None
subcloud_image = None
for device_image_state_obj in subcloud_device_image_states:
@ -220,7 +225,7 @@ class FirmwareAudit(object):
return False
if device_image_state and \
device_image_state.status != "completed":
device_image_state.status != "completed":
# If device image state is not completed it means
# that the image has not been written to the device yet
return False
@ -303,12 +308,10 @@ class FirmwareAudit(object):
for image in audit_data:
# audit_data will be a dict from passing through RPC, so objectify
image = FirmwareAuditData.from_dict(image)
proceed = self._check_subcloud_device_has_image(subcloud_name,
sysinv_client,
image,
enabled_host_device_list,
subcloud_device_image_states,
subcloud_device_label_list)
proceed = self._check_subcloud_device_has_image(
subcloud_name, sysinv_client, image, enabled_host_device_list,
subcloud_device_image_states, subcloud_device_label_list
)
if not proceed:
out_of_sync = True
break

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2021-2023 Wind River Systems, Inc.
# Copyright (c) 2021-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -114,7 +114,7 @@ class KubeRootcaUpdateAudit(Auditor):
# the distributed cloud and the subcloud running on old software
# version that cannot search for the k8s root CA cert id.
if dccommon_utils.is_centos(subcloud.software_version) or \
not subcloud.rehomed:
not subcloud.rehomed:
self.subcloud_audit_alarm_based(subcloud_name, subcloud_region,
session)
return

View File

@ -1,18 +1,17 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from keystoneauth1 import exceptions as keystone_exceptions
@ -25,7 +24,6 @@ from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack import software_v1
from dccommon.drivers.openstack.software_v1 import SoftwareClient
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcmanager.common import utils
LOG = logging.getLogger(__name__)
@ -155,7 +153,8 @@ class PatchAudit(object):
m_os_ks_client = OpenStackDriver(
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
patching_endpoint = m_os_ks_client.endpoint_cache.get_endpoint('patching')
patching_endpoint = m_os_ks_client.endpoint_cache.get_endpoint(
'patching')
sysinv_endpoint = m_os_ks_client.endpoint_cache.get_endpoint('sysinv')
patching_client = PatchingClient(
dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
@ -195,10 +194,14 @@ class PatchAudit(object):
return PatchAuditData(regionone_patches, applied_patch_ids,
committed_patch_ids, regionone_software_version)
def subcloud_audit(self, subcloud_name, subcloud_region, audit_data, software_audit_data,
do_load_audit):
def subcloud_audit(
self, subcloud_name, subcloud_region, audit_data, software_audit_data,
do_load_audit
):
if software_audit_data:
self.subcloud_software_audit(subcloud_name, subcloud_region, software_audit_data)
self.subcloud_software_audit(
subcloud_name, subcloud_region, software_audit_data
)
else:
self.subcloud_patch_audit(subcloud_name, subcloud_region, audit_data,
do_load_audit)

View File

@ -1,32 +1,30 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import eventlet
import os
import time
from tsconfig.tsconfig import CONFIG_PATH
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from tsconfig.tsconfig import CONFIG_PATH
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import sysinv_v1
from dcmanager.audit import firmware_audit
from dcmanager.audit import kube_rootca_update_audit
from dcmanager.audit import kubernetes_audit
@ -81,7 +79,8 @@ class SubcloudAuditManager(manager.Manager):
super(SubcloudAuditManager, self).__init__(
service_name="subcloud_audit_manager")
self.context = context.get_admin_context()
self.audit_worker_rpc_client = dcmanager_audit_rpc_client.ManagerAuditWorkerClient()
self.audit_worker_rpc_client = (
dcmanager_audit_rpc_client.ManagerAuditWorkerClient())
# Number of audits since last subcloud state update
self.audit_count = SUBCLOUD_STATE_UPDATE_ITERATIONS - 2
# Number of patch audits
@ -100,11 +99,13 @@ class SubcloudAuditManager(manager.Manager):
def _add_missing_endpoints(self):
# Update this flag file based on the most recent new endpoint
file_path_list = []
file_path_list.append(os.path.join(CONFIG_PATH,
'.kube_rootca_update_endpoint_added'))
file_path_list.append(os.path.join(
CONFIG_PATH, '.kube_rootca_update_endpoint_added')
)
if cfg.CONF.use_usm:
file_path_list.append(os.path.join(CONFIG_PATH,
'.usm_endpoint_added'))
file_path_list.append(os.path.join(
CONFIG_PATH, '.usm_endpoint_added')
)
for file_path in file_path_list:
# If file exists on the controller, all the endpoints have been
# added to DB since last time an endpoint was added
@ -118,7 +119,9 @@ class SubcloudAuditManager(manager.Manager):
subcloud.id)
# Use set difference to find missing endpoints
if cfg.CONF.use_usm:
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST_USM)
endpoint_type_set = set(
dccommon_consts.ENDPOINT_TYPES_LIST_USM
)
else:
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
subcloud_set = set()
@ -265,17 +268,18 @@ class SubcloudAuditManager(manager.Manager):
SubcloudAuditManager.force_patch_audit):
LOG.info("Trigger load audit")
audit_load = True
if (self.patch_audit_count % 4 == 1):
if self.patch_audit_count % 4 == 1:
LOG.info("Trigger firmware audit")
audit_firmware = True
# Reset force_firmware_audit only when firmware audit has been fired
SubcloudAuditManager.reset_force_firmware_audit()
if (self.patch_audit_count % KUBERNETES_AUDIT_RATE == 1):
if self.patch_audit_count % KUBERNETES_AUDIT_RATE == 1:
LOG.info("Trigger kubernetes audit")
audit_kubernetes = True
# Reset force_kubernetes_audit only when kubernetes audit has been fired
# Reset force_kubernetes_audit only when kubernetes audit has been
# fired
SubcloudAuditManager.reset_force_kubernetes_audit()
if (self.patch_audit_count % KUBE_ROOTCA_UPDATE_AUDIT_RATE == 1):
if self.patch_audit_count % KUBE_ROOTCA_UPDATE_AUDIT_RATE == 1:
LOG.info("Trigger kube rootca update audit")
audit_kube_rootca_updates = True
# Reset force_kube_rootca_update_audit only if audit is fired
@ -319,7 +323,8 @@ class SubcloudAuditManager(manager.Manager):
if audit_patch:
if cfg.CONF.use_usm:
# Query RegionOne releases
software_audit_data = self.patch_audit.get_software_regionone_audit_data()
software_audit_data = \
self.patch_audit.get_software_regionone_audit_data()
else:
# Query RegionOne patches and software version
patch_audit_data = self.patch_audit.get_regionone_audit_data()
@ -396,7 +401,8 @@ class SubcloudAuditManager(manager.Manager):
self.context, last_audit_fixup_threshold)
end = datetime.datetime.utcnow()
if num_fixed > 0:
LOG.info('Fixed up subcloud audit timestamp for %s subclouds.' % num_fixed)
LOG.info(
'Fixed up subcloud audit timestamp for %s subclouds.' % num_fixed)
LOG.info('Fixup took %s seconds' % (end - start))
subcloud_ids = []
@ -452,7 +458,8 @@ class SubcloudAuditManager(manager.Manager):
kube_rootca_update_audit_data))
# We want a chunksize of at least 1 so add the number of workers.
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // CONF.audit_worker_workers
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // (
CONF.audit_worker_workers)
for audit in subcloud_audits:
subcloud_ids.append(audit.subcloud_id)
if len(subcloud_ids) == chunksize:
@ -466,7 +473,10 @@ class SubcloudAuditManager(manager.Manager):
do_openstack_audit,
kube_rootca_update_audit_data,
software_audit_data)
LOG.debug('Sent subcloud audit request message for subclouds: %s' % subcloud_ids)
LOG.debug(
'Sent subcloud audit request message for subclouds: %s' %
subcloud_ids
)
subcloud_ids = []
if len(subcloud_ids) > 0:
# We've got a partial batch...send it off for processing.
@ -479,6 +489,9 @@ class SubcloudAuditManager(manager.Manager):
do_openstack_audit,
kube_rootca_update_audit_data,
software_audit_data)
LOG.debug('Sent final subcloud audit request message for subclouds: %s' % subcloud_ids)
LOG.debug(
'Sent final subcloud audit request message for subclouds: %s' %
subcloud_ids
)
else:
LOG.debug('Done sending audit request messages.')

View File

@ -1,18 +1,17 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
@ -23,7 +22,6 @@ from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dcmanager.audit import alarm_aggregation
from dcmanager.audit import firmware_audit
from dcmanager.audit import kube_rootca_update_audit
@ -131,8 +129,9 @@ class SubcloudAuditWorkerManager(manager.Manager):
subcloud.deploy_status)) or (
(subcloud.deploy_status in [
consts.DEPLOY_STATE_INSTALLING,
consts.DEPLOY_STATE_REHOME_PENDING]) and
subcloud.availability_status == dccommon_consts.AVAILABILITY_OFFLINE):
consts.DEPLOY_STATE_REHOME_PENDING])
and subcloud.availability_status ==
dccommon_consts.AVAILABILITY_OFFLINE):
LOG.debug("Skip subcloud %s audit, deploy_status: %s" %
(subcloud.name, subcloud.deploy_status))
# This DB API call will set the "audit_finished_at" timestamp
@ -399,7 +398,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
except keystone_exceptions.NotFound:
if subcloud.first_identity_sync_complete \
and avail_status_current == dccommon_consts.AVAILABILITY_ONLINE:
and avail_status_current == dccommon_consts.AVAILABILITY_ONLINE:
# The first identity sync is already complete
# Therefore this is an error
LOG.error("Identity or Platform endpoint for online "

View File

@ -1,15 +1,16 @@
# Copyright (c) 2021 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# The right to copy, distribute, modify, or otherwise make use
@ -26,17 +27,17 @@ def request_subcloud_audits(context,
audit_firmware=False,
audit_kubernetes=False,
audit_kube_rootca=False):
values = {}
if update_subcloud_state:
values['state_update_requested'] = True
if audit_patch:
values['patch_audit_requested'] = True
if audit_load:
values['load_audit_requested'] = True
if audit_firmware:
values['firmware_audit_requested'] = True
if audit_kubernetes:
values['kubernetes_audit_requested'] = True
if audit_kube_rootca:
values['kube_rootca_update_audit_requested'] = True
db_api.subcloud_audits_update_all(context, values)
values = {}
if update_subcloud_state:
values['state_update_requested'] = True
if audit_patch:
values['patch_audit_requested'] = True
if audit_load:
values['load_audit_requested'] = True
if audit_firmware:
values['firmware_audit_requested'] = True
if audit_kubernetes:
values['kubernetes_audit_requested'] = True
if audit_kube_rootca:
values['kube_rootca_update_audit_requested'] = True
db_api.subcloud_audits_update_all(context, values)

View File

@ -1,5 +1,5 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -19,24 +19,25 @@
# see http://git.openstack.org/cgit/openstack/ironic/tree/ironic/cmd/api.py
import logging as std_logging
import sys
import eventlet
eventlet.monkey_patch(os=False)
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import systemd
from oslo_service import wsgi
# pylint: disable=wrong-import-position
from oslo_config import cfg # noqa: E402
from oslo_log import log as logging # noqa: E402
from oslo_service import systemd # noqa: E402
from oslo_service import wsgi # noqa: E402
import logging as std_logging
from dcmanager.api import api_config # noqa: E402
from dcmanager.api import app # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
from dcorch.common import messaging as dcorch_messaging # noqa: E402
# pylint: enable=wrong-import-position
from dcmanager.api import api_config
from dcmanager.api import app
from dcmanager.common import config
from dcmanager.common import messaging
from dcorch.common import messaging as dcorch_messaging
CONF = cfg.CONF
config.register_options()
LOG = logging.getLogger('dcmanager.api')

View File

@ -1,4 +1,6 @@
# Copyright (c) 2021 Wind River Systems, Inc.
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -19,14 +21,15 @@ DC Manager Audit Service.
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_i18n import _lazy
from oslo_log import log as logging
from oslo_service import service
from dcmanager.common import config
from dcmanager.common import messaging
# pylint: disable=wrong-import-position
from oslo_config import cfg # noqa: E402
from oslo_i18n import _lazy # noqa: E402
from oslo_log import log as logging # noqa: E402
from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
@ -55,5 +58,6 @@ def main():
launcher.wait()
if __name__ == '__main__':
main()

View File

@ -1,4 +1,6 @@
# Copyright (c) 2021 Wind River Systems, Inc.
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -19,14 +21,15 @@ DC Manager Audit Worker Service.
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_i18n import _lazy
from oslo_log import log as logging
from oslo_service import service
from dcmanager.common import config
from dcmanager.common import messaging
# pylint: disable=wrong-import-position
from oslo_config import cfg # noqa: E402
from oslo_i18n import _lazy # noqa: E402
from oslo_log import log as logging # noqa: E402
from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
@ -55,5 +58,6 @@ def main():
launcher.wait()
if __name__ == '__main__':
main()

View File

@ -1,15 +1,17 @@
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
@ -51,6 +53,7 @@ def add_command_parsers(subparsers):
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Show available commands.',
@ -76,5 +79,6 @@ def main():
except Exception as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()

View File

@ -1,5 +1,7 @@
#!/usr/bin/env python
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -18,18 +20,20 @@ DC Manager Engine Server.
"""
import eventlet
# pylint: disable=wrong-import-position
from oslo_config import cfg # noqa: E402
from oslo_i18n import _lazy # noqa: E402
from oslo_log import log as logging # noqa: E402
from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import consts # noqa: E402
from dcmanager.common import messaging # noqa: E402
from dcorch.common import messaging as dcorch_messaging # noqa: E402
# pylint: enable=wrong-import-position
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_i18n import _lazy
from oslo_log import log as logging
from oslo_service import service
from dcmanager.common import config
from dcmanager.common import consts
from dcmanager.common import messaging
from dcorch.common import messaging as dcorch_messaging
_lazy.enable_lazy()
config.register_options()
config.register_keystone_options()
@ -59,5 +63,6 @@ def main():
# srv.create_periodic_tasks()
launcher.wait()
if __name__ == '__main__':
main()

View File

@ -1,4 +1,6 @@
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -19,14 +21,15 @@ DC Manager Orchestrator Service.
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_i18n import _lazy
from oslo_log import log as logging
from oslo_service import service
from dcmanager.common import config
from dcmanager.common import messaging
# pylint: disable=wrong-import-position
from oslo_config import cfg # noqa: E402
from oslo_i18n import _lazy # noqa: E402
from oslo_log import log as logging # noqa: E402
from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
CONF = cfg.CONF
LOG = logging.getLogger('dcmanager.orchestrator')
@ -54,5 +57,6 @@ def main():
launcher.wait()
if __name__ == '__main__':
main()

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -26,14 +26,16 @@ DC Manager State Engine Server.
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_i18n import _lazy
from oslo_log import log as logging
from oslo_service import service
# pylint: disable=wrong-import-position
from oslo_config import cfg # noqa: E402
from oslo_i18n import _lazy # noqa: E402
from oslo_log import log as logging # noqa: E402
from oslo_service import service # noqa: E402
from dcmanager.common import config
from dcmanager.common import messaging
from dcorch.common import messaging as dcorch_messaging
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
from dcorch.common import messaging as dcorch_messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
@ -65,5 +67,6 @@ def main():
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.state_workers)
launcher.wait()
if __name__ == '__main__':
main()

View File

@ -1,16 +1,18 @@
# Copyright (c) 2016 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
RPC_API_VERSION = "1.0"
@ -397,9 +399,10 @@ BITSTREAM_TYPE_KEY_REVOCATION = 'key-revocation'
# Platform Backup size default in MB
DEFAULT_PERSISTENT_SIZE = 30000
# Retry values to be used when platform requests fail due to temporary unavailability, which
# may occur during some orchestration steps. The sleep duration and number of retries are shorter,
# since these should only occur if a service is being restarted
# Retry values to be used when platform requests fail due to temporary
# unavailability, which may occur during some orchestration steps. The sleep
# duration and number of retries are shorter, since these should only occur if a
# service is being restarted
PLATFORM_RETRY_MAX_ATTEMPTS = 5
PLATFORM_RETRY_SLEEP_MILLIS = 5000

View File

@ -1,27 +1,29 @@
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import pecan
from pecan import hooks
import re
from six.moves.urllib.parse import urlparse
import time
from oslo_context import context as base_context
from oslo_log import log
from oslo_utils import encodeutils
from oslo_utils import uuidutils
import pecan
from pecan import hooks
from six.moves.urllib.parse import urlparse
from dcmanager.api.policies import base as base_policy
from dcmanager.api import policy
@ -52,7 +54,6 @@ class RequestContext(base_context.RequestContext):
user_domain_name=None, project_domain_name=None,
auth_token_info=None, region_name=None, roles=None,
password=None, **kwargs):
"""Initializer of request context."""
# We still have 'tenant' param because oslo_context still use it.
# pylint: disable=E1123
@ -244,8 +245,8 @@ class AuditLoggingHook(hooks.PecanHook):
# [req-088ed3b6-a2c9-483e-b2ad-f1b2d03e06e6
# 3d76d3c1376744e8ad9916a6c3be3e5f
# ca53e70c76d847fd860693f8eb301546]
# When the ctx is defined, the formatter (defined in common/log.py) requires that keys
# request_id, user, tenant be defined within the ctx
# When the ctx is defined, the formatter (defined in common/log.py) requires
# that keys request_id, user, tenant be defined within the ctx
ctx = {'request_id': request_id,
'user': user_id,
'tenant': tenant_id}
@ -261,4 +262,5 @@ class AuditLoggingHook(hooks.PecanHook):
auditLOG.exception("Exception in AuditLoggingHook on event 'after'")
def on_error(self, state, e):
auditLOG.exception("Exception in AuditLoggingHook passed to event 'on_error': " + str(e))
auditLOG.exception("Exception in AuditLoggingHook passed to event "
"'on_error': " + str(e))

View File

@ -1,6 +1,6 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright 2015 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -39,14 +39,14 @@ class DCManagerException(Exception):
def __init__(self, **kwargs):
try:
super(DCManagerException, self).__init__(self.message % kwargs) # pylint: disable=W1645
super(DCManagerException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs # pylint: disable=W1645
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(DCManagerException, self).__init__(self.message) # pylint: disable=W1645
super(DCManagerException, self).__init__(self.message)
if six.PY2:
def __unicode__(self):
@ -259,6 +259,7 @@ class PrestagePreCheckFailedException(DCManagerException):
the subcloud can be skipped during orchestrated prestage
operations.
"""
def __init__(self, subcloud, details, orch_skip=False):
self.orch_skip = orch_skip
# Subcloud can be none if we are failing

View File

@ -1,6 +1,6 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -72,7 +72,6 @@ class Manager(PeriodicTasks):
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""init_host
Hook to do additional manager initialization when one requests
@ -84,7 +83,6 @@ class Manager(PeriodicTasks):
pass
def cleanup_host(self):
"""cleanup_host
Hook to do cleanup work when the service shuts down.
@ -94,7 +92,6 @@ class Manager(PeriodicTasks):
pass
def pre_start_hook(self):
"""pre_start_hook
Hook to provide the manager the ability to do additional
@ -107,7 +104,6 @@ class Manager(PeriodicTasks):
pass
def post_start_hook(self):
"""post_start_hook
Hook to provide the manager the ability to do additional

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -241,13 +241,13 @@ def validate_subcloud_config(context, payload, operation=None,
LOG.exception(e)
pecan.abort(400, _("management_end_address invalid: %s") % e)
if not management_start_ip < management_end_ip:
if management_start_ip > management_end_ip:
pecan.abort(
400,
_("management_start_address not less than "
_("management_start_address greater than "
"management_end_address"))
if not len(netaddr.IPRange(management_start_ip, management_end_ip)) >= \
if len(netaddr.IPRange(management_start_ip, management_end_ip)) < \
MIN_MANAGEMENT_ADDRESSES:
pecan.abort(
400,
@ -379,13 +379,13 @@ def validate_admin_network_config(admin_subnet_str,
LOG.exception(e)
pecan.abort(400, _("admin_end_address invalid: %s") % e)
if not admin_start_ip < admin_end_ip:
if admin_start_ip > admin_end_ip:
pecan.abort(
400,
_("admin_start_address not less than "
_("admin_start_address greater than "
"admin_end_address"))
if not len(netaddr.IPRange(admin_start_ip, admin_end_ip)) >= \
if len(netaddr.IPRange(admin_start_ip, admin_end_ip)) < \
MIN_ADMIN_ADDRESSES:
pecan.abort(
400,
@ -975,7 +975,9 @@ def populate_payload_with_pre_existing_data(payload: dict,
msg = _("Required %s file was not provided and it was not "
"previously available.") % value
pecan.abort(400, msg)
payload.update(dict(list(existing_values.items()) + list(payload.items())))
payload.update(
dict(list(existing_values.items()) + list(payload.items()))
)
elif value == consts.DEPLOY_CONFIG:
if not payload.get(consts.DEPLOY_CONFIG):
fn = get_config_file_path(subcloud.name, value)

View File

@ -1,18 +1,18 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2017, 2019, 2021, 2022 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import oslo_messaging
@ -27,6 +27,7 @@ class Mapping(object):
for key, value in mapping.items():
self.reverse_mapping[value] = key
_SINGLETON_MAPPING = Mapping({
ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
})

View File

@ -1,43 +1,46 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import grp
import itertools
import json
import netaddr
import os
import pecan
import pwd
import re
import resource as sys_resource
import six.moves
import string
import subprocess
import tsconfig.tsconfig as tsc
import uuid
import resource as sys_resource
import xml.etree.ElementTree as ElementTree
import yaml
import pecan
from keystoneauth1 import exceptions as keystone_exceptions
import netaddr
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import base64
import six.moves
import tsconfig.tsconfig as tsc
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
@ -249,43 +252,41 @@ def get_sw_update_strategy_extra_args(context, update_type=None):
return {}
def get_sw_update_opts(context,
for_sw_update=False, subcloud_id=None):
"""Get sw update options for a subcloud
def get_sw_update_opts(context, for_sw_update=False, subcloud_id=None):
"""Get sw update options for a subcloud
:param context: request context object.
:param for_sw_update: return the default options if subcloud options
are empty. Useful for retrieving sw update
options on application of patch strategy.
:param subcloud_id: id of subcloud.
:param context: request context object.
:param for_sw_update: return the default options if subcloud options
are empty. Useful for retrieving sw update
options on application of patch strategy.
:param subcloud_id: id of subcloud.
"""
"""
if subcloud_id is None:
# Requesting defaults. Return constants if no entry in db.
if subcloud_id is None:
# Requesting defaults. Return constants if no entry in db.
sw_update_opts_ref = db_api.sw_update_opts_default_get(context)
if not sw_update_opts_ref:
sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT
return sw_update_opts_dict
else:
# requesting subcloud options
sw_update_opts_ref = db_api.sw_update_opts_get(context, subcloud_id)
if sw_update_opts_ref:
subcloud_name = db_api.subcloud_get(context, subcloud_id).name
return db_api.sw_update_opts_w_name_db_model_to_dict(
sw_update_opts_ref, subcloud_name)
elif for_sw_update:
sw_update_opts_ref = db_api.sw_update_opts_default_get(context)
if not sw_update_opts_ref:
sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT
return sw_update_opts_dict
else:
# requesting subcloud options
sw_update_opts_ref = db_api.sw_update_opts_get(context,
subcloud_id)
if sw_update_opts_ref:
subcloud_name = db_api.subcloud_get(context, subcloud_id).name
return db_api.sw_update_opts_w_name_db_model_to_dict(
sw_update_opts_ref, subcloud_name)
elif for_sw_update:
sw_update_opts_ref = db_api.sw_update_opts_default_get(context)
if not sw_update_opts_ref:
sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT
return sw_update_opts_dict
else:
raise exceptions.SubcloudPatchOptsNotFound(
subcloud_id=subcloud_id)
raise exceptions.SubcloudPatchOptsNotFound(
subcloud_id=subcloud_id)
return db_api.sw_update_opts_w_name_db_model_to_dict(
sw_update_opts_ref, dccommon_consts.SW_UPDATE_DEFAULT_TITLE)
return db_api.sw_update_opts_w_name_db_model_to_dict(
sw_update_opts_ref, dccommon_consts.SW_UPDATE_DEFAULT_TITLE)
def ensure_lock_path():
@ -618,8 +619,8 @@ def subcloud_peer_group_get_by_ref(context, group_ref):
def subcloud_db_list_to_dict(subclouds):
return {'subclouds': [db_api.subcloud_db_model_to_dict(subcloud)
for subcloud in subclouds]}
return {'subclouds':
[db_api.subcloud_db_model_to_dict(subcloud) for subcloud in subclouds]}
def get_oam_addresses(subcloud, sc_ks_client):
@ -811,7 +812,7 @@ def find_ansible_error_msg(subcloud_name, log_file, stage=None):
else:
files_for_search.append(log_file)
if (len(files_for_search) < 2):
if len(files_for_search) < 2:
cmd_list = ([cmd_1, cmd_2, files_for_search[0]])
else:
cmd_list = ([cmd_1, cmd_2, files_for_search[0], files_for_search[1]])
@ -858,7 +859,6 @@ def get_failed_task(files):
Returns a string with the task and date
"""
cmd_1 = 'awk'
# awk command to get the information about last failed task.
# Match expression starting with 'TASK [' and ending with
@ -873,7 +873,7 @@ def get_failed_task(files):
''')
# necessary check since is possible to have
# the error in rotated ansible log
if (len(files) < 2):
if len(files) < 2:
awk_cmd = ([cmd_1, cmd_2, files[0]])
else:
awk_cmd = ([cmd_1, cmd_2, files[0], files[1]])
@ -903,9 +903,7 @@ def summarize_message(error_msg):
Returns a brief message.
"""
list_of_strings_to_search_for = [
'msg:', 'fail', 'error', 'cmd', 'stderr'
]
list_of_strings_to_search_for = ['msg:', 'fail', 'error', 'cmd', 'stderr']
brief_message = []
for line in error_msg:
for s in list_of_strings_to_search_for:
@ -933,10 +931,9 @@ def is_valid_for_backup_operation(operation, subcloud, bootstrap_address_dict=No
def _is_valid_for_backup_create(subcloud):
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE \
or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED \
or subcloud.deploy_status not in consts.VALID_DEPLOY_STATES_FOR_BACKUP:
or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED \
or subcloud.deploy_status not in consts.VALID_DEPLOY_STATES_FOR_BACKUP:
msg = ('Subcloud %s must be online, managed and have valid '
'deploy-status for the subcloud-backup '
'create operation.' % subcloud.name)
@ -946,9 +943,8 @@ def _is_valid_for_backup_create(subcloud):
def _is_valid_for_backup_delete(subcloud):
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE \
or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED:
or subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED:
msg = ('Subcloud %s must be online and managed for the subcloud-backup'
' delete operation with --local-only option.' % subcloud.name)
raise exceptions.ValidateFail(msg)
@ -967,7 +963,7 @@ def _is_valid_for_backup_restore(subcloud, bootstrap_address_dict=None):
has_inventory_file = os.path.exists(ansible_subcloud_inventory_file)
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED \
or subcloud.deploy_status in consts.INVALID_DEPLOY_STATES_FOR_RESTORE:
or subcloud.deploy_status in consts.INVALID_DEPLOY_STATES_FOR_RESTORE:
msg = ('Subcloud %s must be unmanaged and in a valid deploy state '
'for the subcloud-backup restore operation.' % subcloud.name)
elif not (has_bootstrap_address or has_install_values or has_inventory_file):
@ -1401,7 +1397,9 @@ def get_sw_version(release=None):
def validate_release_version_supported(release_version_to_check):
"""Given a release version, check whether it's supported by the current active version.
"""Given a release version, check whether it's supported by the current active
version.
:param release_version_to_check: version string to validate

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Interface for database access.
@ -136,13 +137,12 @@ def subcloud_create(context, name, description, location, software_version,
systemcontroller_gateway_ip, deploy_status, error_description,
region_name, openstack_installed, group_id, data_install=None):
"""Create a subcloud."""
return IMPL.subcloud_create(context, name, description, location,
software_version,
management_subnet, management_gateway_ip,
management_start_ip, management_end_ip,
systemcontroller_gateway_ip, deploy_status,
error_description, region_name, openstack_installed, group_id,
data_install)
return IMPL.subcloud_create(
context, name, description, location, software_version, management_subnet,
management_gateway_ip, management_start_ip, management_end_ip,
systemcontroller_gateway_ip, deploy_status, error_description, region_name,
openstack_installed, group_id, data_install
)
def subcloud_get(context, subcloud_id):
@ -185,29 +185,26 @@ def subcloud_get_all_with_status(context):
return IMPL.subcloud_get_all_with_status(context)
def subcloud_update(context, subcloud_id, management_state=None,
availability_status=None, software_version=None, name=None,
description=None, management_subnet=None, management_gateway_ip=None,
management_start_ip=None, management_end_ip=None,
location=None, audit_fail_count=None,
deploy_status=None, backup_status=None,
backup_datetime=None, error_description=None,
openstack_installed=None, group_id=None,
data_install=None, data_upgrade=None,
first_identity_sync_complete=None,
systemcontroller_gateway_ip=None,
peer_group_id=None, rehome_data=None, rehomed=None):
def subcloud_update(
context, subcloud_id, management_state=None, availability_status=None,
software_version=None, name=None, description=None, management_subnet=None,
management_gateway_ip=None, management_start_ip=None, management_end_ip=None,
location=None, audit_fail_count=None, deploy_status=None, backup_status=None,
backup_datetime=None, error_description=None, openstack_installed=None,
group_id=None, data_install=None, data_upgrade=None,
first_identity_sync_complete=None, systemcontroller_gateway_ip=None,
peer_group_id=None, rehome_data=None, rehomed=None
):
"""Update a subcloud or raise if it does not exist."""
return IMPL.subcloud_update(context, subcloud_id, management_state,
availability_status, software_version, name,
description, management_subnet, management_gateway_ip,
management_start_ip, management_end_ip, location,
audit_fail_count, deploy_status, backup_status,
backup_datetime, error_description, openstack_installed,
group_id, data_install, data_upgrade,
first_identity_sync_complete,
systemcontroller_gateway_ip, peer_group_id,
rehome_data, rehomed)
return IMPL.subcloud_update(
context, subcloud_id, management_state, availability_status,
software_version, name, description, management_subnet,
management_gateway_ip, management_start_ip, management_end_ip, location,
audit_fail_count, deploy_status, backup_status, backup_datetime,
error_description, openstack_installed, group_id, data_install, data_upgrade,
first_identity_sync_complete, systemcontroller_gateway_ip, peer_group_id,
rehome_data, rehomed
)
def subcloud_bulk_update_by_ids(context, subcloud_ids, update_form):
@ -220,8 +217,6 @@ def subcloud_destroy(context, subcloud_id):
return IMPL.subcloud_destroy(context, subcloud_id)
###################
def subcloud_status_create(context, subcloud_id, endpoint_type):
"""Create a subcloud status for an endpoint_type."""
return IMPL.subcloud_status_create(context, subcloud_id, endpoint_type)
@ -261,7 +256,6 @@ def subcloud_endpoint_status_db_model_to_dict(subcloud_status):
def subcloud_status_get(context, subcloud_id, endpoint_type):
"""Retrieve the subcloud status for an endpoint
Will raise if subcloud does not exist.
@ -766,7 +760,6 @@ def sw_update_opts_update(context, subcloud_id,
max_parallel_workers=None,
alarm_restriction_type=None,
default_instance_action=None):
"""Update sw update options or raise if it does not exist."""
return IMPL.sw_update_opts_update(context, subcloud_id,
storage_apply_type,
@ -806,7 +799,6 @@ def sw_update_opts_default_update(context,
max_parallel_workers=None,
alarm_restriction_type=None,
default_instance_action=None):
"""Update default sw update options."""
return IMPL.sw_update_opts_default_update(context,
storage_apply_type,

View File

@ -1,18 +1,18 @@
# Copyright (c) 2015 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
@ -20,7 +20,6 @@ Implementation of SQLAlchemy backend.
"""
import datetime
import sqlalchemy
import sys
import threading
@ -31,7 +30,7 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import uuidutils
import sqlalchemy
from sqlalchemy import desc
from sqlalchemy import or_
from sqlalchemy.orm.exc import MultipleResultsFound
@ -198,12 +197,14 @@ def subcloud_audits_get_all_need_audit(context, last_audit_threshold):
with read_session() as session:
result = session.query(models.SubcloudAudits).\
filter_by(deleted=0).\
filter(models.SubcloudAudits.audit_started_at <= models.SubcloudAudits.audit_finished_at).\
filter(models.SubcloudAudits.audit_started_at <=
models.SubcloudAudits.audit_finished_at).\
filter((models.SubcloudAudits.audit_finished_at < last_audit_threshold) |
(models.SubcloudAudits.patch_audit_requested == true()) |
(models.SubcloudAudits.firmware_audit_requested == true()) |
(models.SubcloudAudits.load_audit_requested == true()) |
(models.SubcloudAudits.kube_rootca_update_audit_requested == true()) |
(models.SubcloudAudits.kube_rootca_update_audit_requested ==
true()) |
(models.SubcloudAudits.kubernetes_audit_requested == true())).\
all()
return result
@ -334,10 +335,9 @@ def subcloud_get_by_region_name(context, region_name):
@require_context
def subcloud_get_by_name_or_region_name(context, name):
result = model_query(context, models.Subcloud). \
filter_by(deleted=0). \
filter(or_(models.Subcloud.name == name, models.Subcloud.region_name == name)). \
first()
result = model_query(context, models.Subcloud).filter_by(deleted=0).filter(
or_(models.Subcloud.name == name, models.Subcloud.region_name == name)
).first()
if not result:
raise exception.SubcloudNameOrRegionNameNotFound(name=name)

View File

@ -1,23 +1,24 @@
# Copyright (c) 2015 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from dccommon.drivers.openstack import vim
import sqlalchemy
from dccommon.drivers.openstack import vim
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()

View File

@ -1,15 +1,17 @@
# Copyright (c) 2021 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
@ -58,7 +60,8 @@ def upgrade(migrate_engine):
subcloud_list = list(subclouds.select().where(subclouds.c.deleted == 0)
.order_by(subclouds.c.id).execute())
for subcloud in subcloud_list:
subcloud_audits.insert().execute({'subcloud_id': subcloud['id']}) # pylint: disable=no-value-for-parameter
# pylint: disable-next=no-value-for-parameter
subcloud_audits.insert().execute({'subcloud_id': subcloud['id']})
def downgrade(migrate_engine):

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
"""
SQLAlchemy models for dcmanager data.
"""
@ -22,26 +23,20 @@ import datetime
import json
from oslo_db.sqlalchemy import models
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import session as orm_session
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import session as orm_session
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import TypeDecorator
from sqlalchemy.types import VARCHAR
# from dcmanager.common import consts
BASE = declarative_base()
@ -209,14 +204,17 @@ class SubcloudAudits(BASE, DCManagerBase):
subcloud_id = Column(Integer,
ForeignKey('subclouds.id', ondelete='CASCADE'),
unique=True)
audit_started_at = Column(DateTime(timezone=False), default=datetime.datetime.min)
audit_finished_at = Column(DateTime(timezone=False), default=datetime.datetime.min)
audit_started_at = Column(DateTime(timezone=False),
default=datetime.datetime.min)
audit_finished_at = Column(DateTime(timezone=False),
default=datetime.datetime.min)
state_update_requested = Column(Boolean, nullable=False, default=False)
patch_audit_requested = Column(Boolean, nullable=False, default=False)
load_audit_requested = Column(Boolean, nullable=False, default=False)
firmware_audit_requested = Column(Boolean, nullable=False, default=False)
kubernetes_audit_requested = Column(Boolean, nullable=False, default=False)
kube_rootca_update_audit_requested = Column(Boolean, nullable=False, default=False)
kube_rootca_update_audit_requested = Column(Boolean, nullable=False,
default=False)
spare_audit_requested = Column(Boolean, nullable=False, default=False)
spare2_audit_requested = Column(Boolean, nullable=False, default=False)
reserved = Column(Text)

View File

@ -29,6 +29,7 @@ LOG = logging.getLogger(__name__)
class PeerGroupAuditManager(manager.Manager):
"""Manages audit related tasks."""
def __init__(self, subcloud_manager, peer_group_id, *args, **kwargs):
LOG.debug(_('PeerGroupAuditManager initialization...'))
super().__init__(service_name="peer_group_audit_manager",
@ -117,7 +118,7 @@ class PeerGroupAuditManager(manager.Manager):
# deploy status to consts.DEPLOY_STATE_REHOME_PENDING to stop cert-mon
# audits.
if remote_peer_group.get("migration_status") == \
consts.PEER_GROUP_MIGRATING:
consts.PEER_GROUP_MIGRATING:
# Unmanaged all local subclouds of peer group
LOG.info("Unmanaged all local subclouds of peer group %s "
"since remote is in migrating state" %
@ -130,7 +131,7 @@ class PeerGroupAuditManager(manager.Manager):
# an already unmanaged subcloud, so the deploy status
# update must be done separately
if subcloud.management_state != \
dccommon_consts.MANAGEMENT_UNMANAGED:
dccommon_consts.MANAGEMENT_UNMANAGED:
# Unmanage and update the deploy-status
LOG.info("Unmanaging and setting the local subcloud "
f"{subcloud.name} deploy status to "
@ -160,7 +161,7 @@ class PeerGroupAuditManager(manager.Manager):
# get remote subclouds. For 'managed+online' subclouds,
# set 'unmanaged+secondary' to local on same subclouds
elif remote_peer_group.get("migration_status") == \
consts.PEER_GROUP_MIGRATION_COMPLETE:
consts.PEER_GROUP_MIGRATION_COMPLETE or self.require_audit_flag:
remote_subclouds = \
self._get_subclouds_by_peer_group_from_system_peer(
system_peer,
@ -182,7 +183,7 @@ class PeerGroupAuditManager(manager.Manager):
# There will be an exception when unmanage
# a subcloud in 'unamaged' state.
if subcloud.management_state != \
dccommon_consts.MANAGEMENT_UNMANAGED:
dccommon_consts.MANAGEMENT_UNMANAGED:
self.subcloud_manager.update_subcloud(
self.context,
subcloud.id,

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -131,17 +131,21 @@ class PeerMonitor(object):
self._raise_failure()
db_api.system_peer_update(
self.context, self.peer.id,
availability_state=consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE)
availability_state= # noqa: E251
consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE
)
failure_count = 0
self._set_require_audit_flag_to_associated_peer_groups()
else:
failure_count = 0
self._audit_local_peer_groups(remote_pg_list)
if self.peer.availability_state != \
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE:
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE:
db_api.system_peer_update(
self.context, self.peer.id,
availability_state=consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE)
availability_state= # noqa: E251
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE
)
LOG.info("DC %s back online, clear alarm" %
self.peer.peer_name)
self._clear_failure()
@ -167,9 +171,8 @@ class PeerMonitor(object):
# Audit for require_audit_flag is True or
# Remote peer group is in 'complete' state.
if (pgam_obj.require_audit_flag
or remote_peer_group.get("migration_status")
== consts.PEER_GROUP_MIGRATION_COMPLETE
):
or remote_peer_group.get("migration_status") ==
consts.PEER_GROUP_MIGRATION_COMPLETE):
pgam_obj.audit_peer_group_from_system(
self.peer, remote_peer_group, peer_group)
else:
@ -191,7 +194,7 @@ class PeerMonitor(object):
return msg
def _clean_peer_group_audit_threads(self):
for peer_group_id in self.peer_group_audit_obj_map:
for peer_group_id, _ in self.peer_group_audit_obj_map.items():
pgam_obj = \
self.peer_group_audit_obj_map[peer_group_id]
pgam_obj.stop()
@ -235,6 +238,7 @@ class PeerMonitor(object):
class PeerMonitorManager(manager.Manager):
"""Manages tasks related to peer monitor."""
def __init__(self, subcloud_manager):
LOG.debug('PeerMonitorManager initialization...')

View File

@ -1,22 +1,25 @@
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import os
import six
import threading
import functools
import six
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
@ -159,16 +162,18 @@ class DCManagerService(service.Service):
# get subcloud by region name
LOG.debug("Handling get_subcloud_name_by_region_name request for "
"region: %s" % subcloud_region)
subcloud = self.subcloud_manager.get_subcloud_name_by_region_name(context,
subcloud_region)
subcloud = self.subcloud_manager.get_subcloud_name_by_region_name(
context, subcloud_region
)
return subcloud
@request_context
def update_subcloud(self, context, subcloud_id, management_state=None,
description=None, location=None,
group_id=None, data_install=None, force=None,
deploy_status=None,
peer_group_id=None, bootstrap_values=None, bootstrap_address=None):
def update_subcloud(
self, context, subcloud_id, management_state=None, description=None,
location=None, group_id=None, data_install=None, force=None,
deploy_status=None, peer_group_id=None, bootstrap_values=None,
bootstrap_address=None
):
# Updates a subcloud
LOG.info("Handling update_subcloud request for: %s" % subcloud_id)
subcloud = self.subcloud_manager.update_subcloud(context, subcloud_id,
@ -188,9 +193,8 @@ class DCManagerService(service.Service):
def update_subcloud_with_network_reconfig(self, context, subcloud_id, payload):
LOG.info("Handling update_subcloud_with_network_reconfig request for: %s",
subcloud_id)
return self.subcloud_manager.update_subcloud_with_network_reconfig(context,
subcloud_id,
payload)
return self.subcloud_manager.update_subcloud_with_network_reconfig(
context, subcloud_id, payload)
@run_in_thread
@request_context

View File

@ -1,19 +1,20 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import base64
@ -95,7 +96,8 @@ ANSIBLE_SUBCLOUD_UPDATE_PLAYBOOK = \
# the support of rehoming a subcloud with a software version below 22.12
ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT = \
consts.ANSIBLE_CURRENT_VERSION_BASE_PATH + \
'/roles/rehome-subcloud/update-keystone-data/files/validate_keystone_passwords.sh'
'/roles/rehome-subcloud/update-keystone-data/files/' + \
'validate_keystone_passwords.sh'
USERS_TO_REPLICATE = [
'sysinv',
@ -344,14 +346,15 @@ class SubcloudManager(manager.Manager):
software_version if software_version else SW_VERSION]
return bootstrap_command
def compose_config_command(self, subcloud_name, ansible_subcloud_inventory_file, payload):
def compose_config_command(
self, subcloud_name, ansible_subcloud_inventory_file, payload):
config_command = [
"ansible-playbook", payload[consts.DEPLOY_PLAYBOOK],
"-e", "@%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" +
subcloud_name + '_deploy_values.yml',
"-i", ansible_subcloud_inventory_file,
"--limit", subcloud_name
]
]
return config_command
def compose_backup_command(self, subcloud_name, ansible_subcloud_inventory_file):
@ -359,34 +362,42 @@ class SubcloudManager(manager.Manager):
"ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_CREATE_PLAYBOOK,
"-i", ansible_subcloud_inventory_file,
"--limit", subcloud_name,
"-e", "subcloud_bnr_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" +
subcloud_name + "_backup_create_values.yml"]
"-e",
"subcloud_bnr_overrides=%s" % (
dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name +
"_backup_create_values.yml"
)
]
return backup_command
def compose_backup_delete_command(self, subcloud_name,
ansible_subcloud_inventory_file=None):
backup_command = [
"ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_DELETE_PLAYBOOK,
"-e", "subcloud_bnr_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" +
subcloud_name + "_backup_delete_values.yml"]
"-e", "subcloud_bnr_overrides=%s" %
dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" +
subcloud_name + "_backup_delete_values.yml"
]
if ansible_subcloud_inventory_file:
# Backup stored in subcloud storage
backup_command.extend(("-i", ansible_subcloud_inventory_file,
"--limit", subcloud_name))
"--limit", subcloud_name))
else:
# Backup stored in central storage
backup_command.extend(("-e", "inventory_hostname=%s" % subcloud_name))
return backup_command
def compose_backup_restore_command(self, subcloud_name, ansible_subcloud_inventory_file):
def compose_backup_restore_command(
self, subcloud_name, ansible_subcloud_inventory_file):
backup_command = [
"ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_RESTORE_PLAYBOOK,
"-i", ansible_subcloud_inventory_file,
"--limit", subcloud_name,
"-e", "subcloud_bnr_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" +
subcloud_name + "_backup_restore_values.yml"]
"-e", "subcloud_bnr_overrides=%s" % (
dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name +
"_backup_restore_values.yml"
)
]
return backup_command
def compose_update_command(self, subcloud_name, ansible_subcloud_inventory_file):
@ -395,8 +406,11 @@ class SubcloudManager(manager.Manager):
"-i", ansible_subcloud_inventory_file,
"--limit", subcloud_name,
"--timeout", UPDATE_PLAYBOOK_TIMEOUT,
"-e", "subcloud_update_overrides=%s" % dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" +
subcloud_name + "_update_values.yml"]
"-e", "subcloud_update_overrides=%s" % (
dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + subcloud_name +
"_update_values.yml"
)
]
return subcloud_update_command
def compose_rehome_command(self, subcloud_name, subcloud_region,
@ -447,7 +461,7 @@ class SubcloudManager(manager.Manager):
while True:
offline_seconds = time.monotonic() - job_done_ts
if subcloud.availability_status == \
dccommon_consts.AVAILABILITY_OFFLINE:
dccommon_consts.AVAILABILITY_OFFLINE:
if offline_seconds >= consts.BATCH_REHOME_MGMT_STATES_TIMEOUT:
LOG.warning("Skip trying to manage subcloud: %s, "
"wait online timeout [%d]" %
@ -487,7 +501,7 @@ class SubcloudManager(manager.Manager):
self.context, association.system_peer_id)
# Get 'available' system peer
if system_peer.availability_state != \
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE:
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE:
LOG.warning("Peer system %s offline, skip checking" %
system_peer.peer_name)
continue
@ -607,16 +621,19 @@ class SubcloudManager(manager.Manager):
# subcloud_ref could be int type id.
subcloud = utils.subcloud_get_by_ref(context, str(subcloud_ref))
if not subcloud:
LOG.error("Failed to migrate, non-existent subcloud %s" % subcloud_ref)
LOG.error(
"Failed to migrate, non-existent subcloud %s" % subcloud_ref
)
return
if 'sysadmin_password' not in payload:
LOG.error("Failed to migrate subcloud: %s, must provide sysadmin_password" %
subcloud.name)
LOG.error("Failed to migrate subcloud: %s, must provide "
"sysadmin_password" % subcloud.name)
return
if subcloud.deploy_status not in [consts.DEPLOY_STATE_SECONDARY,
consts.DEPLOY_STATE_REHOME_FAILED,
consts.DEPLOY_STATE_REHOME_PREP_FAILED]:
if subcloud.deploy_status not in [
consts.DEPLOY_STATE_SECONDARY, consts.DEPLOY_STATE_REHOME_FAILED,
consts.DEPLOY_STATE_REHOME_PREP_FAILED
]:
LOG.error("Failed to migrate subcloud: %s, "
"must be in secondary or rehome failure state" %
subcloud.name)
@ -628,7 +645,8 @@ class SubcloudManager(manager.Manager):
rehome_data = json.loads(subcloud.rehome_data)
saved_payload = rehome_data['saved_payload']
# Update sysadmin_password
sysadmin_password = base64.b64decode(payload['sysadmin_password']).decode('utf-8')
sysadmin_password = \
base64.b64decode(payload['sysadmin_password']).decode('utf-8')
saved_payload['sysadmin_password'] = sysadmin_password
# Decode admin_password
if 'admin_password' in saved_payload:
@ -814,7 +832,8 @@ class SubcloudManager(manager.Manager):
:param subcloud_id: id of the subcloud
:param payload: subcloud configuration
"""
LOG.info(f"Adding subcloud {payload['name']} with region {payload['region_name']}.")
LOG.info(f"Adding subcloud {payload['name']} with region "
f"{payload['region_name']}.")
rehoming = payload.get('migrate', '').lower() == "true"
secondary = (payload.get('secondary', '').lower() == "true")
@ -1202,8 +1221,9 @@ class SubcloudManager(manager.Manager):
:param payload: subcloud resume payload
:param deploy_states_to_run: deploy phases pending execution
"""
LOG.info("Resuming deployment of subcloud %s. Deploy phases to be executed: %s"
% (subcloud_name, ', '.join(deploy_states_to_run)))
LOG.info(
"Resuming deployment of subcloud %s. Deploy phases to be executed: %s" %
(subcloud_name, ', '.join(deploy_states_to_run)))
self.run_deploy_phases(context, subcloud_id, payload,
deploy_states_to_run,
@ -1294,7 +1314,8 @@ class SubcloudManager(manager.Manager):
:param payload: subcloud configuration
:param rehoming: flag indicating if this is part of a rehoming operation
:param initial_deployment: initial_deployment flag from subcloud inventory
:param return_as_dict: converts the subcloud DB object to a dict before returning
:param return_as_dict: converts the subcloud DB object to a dict before
returning
:return: resulting subcloud DB object or dictionary
"""
LOG.info("Creating subcloud %s." % payload['name'])
@ -1454,7 +1475,8 @@ class SubcloudManager(manager.Manager):
if 'admin_password' in original_payload:
# Encode admin_password
original_payload['admin_password'] = base64.b64encode(
original_payload['admin_password'].encode("utf-8")).decode('utf-8')
original_payload['admin_password'].encode("utf-8")
).decode('utf-8')
bootstrap_info = utils.create_subcloud_rehome_data_template()
bootstrap_info['saved_payload'] = original_payload
rehome_data = json.dumps(bootstrap_info)
@ -1914,11 +1936,13 @@ class SubcloudManager(manager.Manager):
return subcloud, success
@staticmethod
def _build_subcloud_operation_notice(operation, failed_subclouds, invalid_subclouds):
def _build_subcloud_operation_notice(
operation, failed_subclouds, invalid_subclouds):
invalid_subcloud_names = [subcloud.name for subcloud in invalid_subclouds]
failed_subcloud_names = [subcloud.name for subcloud in failed_subclouds]
notice = "Subcloud backup %s operation completed with warnings:\n" % operation
notice = (
"Subcloud backup %s operation completed with warnings:\n" % operation)
if invalid_subclouds:
notice += ("The following subclouds were skipped for local backup "
"%s operation: %s."
@ -2417,15 +2441,21 @@ class SubcloudManager(manager.Manager):
# both controllers.
management_subnet = netaddr.IPNetwork(subcloud.management_subnet)
endpoint = keystone_client.endpoint_cache.get_endpoint('sysinv')
sysinv_client = SysinvClient(dccommon_consts.DEFAULT_REGION_NAME, keystone_client.session,
endpoint=endpoint)
cached_regionone_data = self._get_cached_regionone_data(keystone_client, sysinv_client)
sysinv_client = SysinvClient(
dccommon_consts.DEFAULT_REGION_NAME,
keystone_client.session,
endpoint=endpoint
)
cached_regionone_data = self._get_cached_regionone_data(
keystone_client, sysinv_client)
for mgmt_if_uuid in cached_regionone_data['mgmt_interface_uuids']:
sysinv_client.delete_route(mgmt_if_uuid,
str(management_subnet.ip),
management_subnet.prefixlen,
str(netaddr.IPAddress(subcloud.systemcontroller_gateway_ip)),
1)
sysinv_client.delete_route(
mgmt_if_uuid,
str(management_subnet.ip),
management_subnet.prefixlen,
str(netaddr.IPAddress(subcloud.systemcontroller_gateway_ip)),
1
)
@staticmethod
def _delete_subcloud_cert(subcloud_region):
@ -2558,7 +2588,7 @@ class SubcloudManager(manager.Manager):
mkey = list(data.keys())[0]
if mkey in data and 'hosts' in data[mkey] and \
cur_sc_name in data[mkey]['hosts']:
cur_sc_name in data[mkey]['hosts']:
data[mkey]['hosts'][new_sc_name] = \
data[mkey]['hosts'].pop(cur_sc_name)
@ -2758,8 +2788,11 @@ class SubcloudManager(manager.Manager):
# it's necessary to save it first, then put it back after
# after bootstrap_values is updated.
if 'bootstrap-address' in rehome_data_dict['saved_payload']:
_bootstrap_address = rehome_data_dict['saved_payload']['bootstrap-address']
bootstrap_values_dict = yaml.load(bootstrap_values, Loader=yaml.SafeLoader)
_bootstrap_address = \
rehome_data_dict['saved_payload']['bootstrap-address']
bootstrap_values_dict = yaml.load(
bootstrap_values, Loader=yaml.SafeLoader
)
# remove sysadmin_password,ansible_ssh_pass,ansible_become_pass
# encode admin_password
@ -2771,11 +2804,13 @@ class SubcloudManager(manager.Manager):
del bootstrap_values_dict['ansible_become_pass']
if 'admin_password' in bootstrap_values_dict:
bootstrap_values_dict['admin_password'] = base64.b64encode(
bootstrap_values_dict['admin_password'].encode("utf-8")).decode('utf-8')
bootstrap_values_dict['admin_password'].encode("utf-8")
).decode('utf-8')
rehome_data_dict['saved_payload'] = bootstrap_values_dict
# put bootstrap_address back into rehome_data_dict
if _bootstrap_address:
rehome_data_dict['saved_payload']['bootstrap-address'] = _bootstrap_address
rehome_data_dict['saved_payload'][
'bootstrap-address'] = _bootstrap_address
# update bootstrap_address
if bootstrap_address:
@ -2784,7 +2819,8 @@ class SubcloudManager(manager.Manager):
resource='subcloud',
msg='Cannot update bootstrap_address into rehome data, '
'need to import bootstrap_values first')
rehome_data_dict['saved_payload']['bootstrap-address'] = bootstrap_address
rehome_data_dict['saved_payload'][
'bootstrap-address'] = bootstrap_address
rehome_data = None
if rehome_data_dict:
@ -3195,7 +3231,9 @@ class SubcloudManager(manager.Manager):
try:
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
except Exception:
LOG.exception("Failed to get subcloud by region name: %s" % subcloud_region)
LOG.exception(
"Failed to get subcloud by region name: %s" % subcloud_region
)
raise
try:
@ -3256,9 +3294,10 @@ class SubcloudManager(manager.Manager):
return prestage.prestage_subcloud(context, payload)
@utils.synchronized("regionone-data-cache", external=False)
def _get_cached_regionone_data(self, regionone_keystone_client, regionone_sysinv_client=None):
if (not SubcloudManager.regionone_data or
SubcloudManager.regionone_data['expiry'] <= datetime.datetime.utcnow()):
def _get_cached_regionone_data(
self, regionone_keystone_client, regionone_sysinv_client=None):
if (not SubcloudManager.regionone_data or SubcloudManager.regionone_data[
'expiry'] <= datetime.datetime.utcnow()):
user_list = regionone_keystone_client.get_enabled_users(id_only=False)
for user in user_list:
if user.name == dccommon_consts.ADMIN_USER_NAME:
@ -3268,15 +3307,18 @@ class SubcloudManager(manager.Manager):
elif user.name == dccommon_consts.DCMANAGER_USER_NAME:
SubcloudManager.regionone_data['dcmanager_user_id'] = user.id
project_list = regionone_keystone_client.get_enabled_projects(id_only=False)
project_list = regionone_keystone_client.get_enabled_projects(
id_only=False)
for project in project_list:
if project.name == dccommon_consts.ADMIN_PROJECT_NAME:
SubcloudManager.regionone_data['admin_project_id'] = project.id
elif project.name == dccommon_consts.SERVICES_USER_NAME:
SubcloudManager.regionone_data['services_project_id'] = project.id
SubcloudManager.regionone_data['services_project_id'] = \
project.id
if regionone_sysinv_client is None:
endpoint = regionone_keystone_client.endpoint_cache.get_endpoint('sysinv')
endpoint = regionone_keystone_client.endpoint_cache.get_endpoint(
'sysinv')
regionone_sysinv_client = SysinvClient(
dccommon_consts.DEFAULT_REGION_NAME,
regionone_keystone_client.session,
@ -3289,8 +3331,8 @@ class SubcloudManager(manager.Manager):
controller.hostname)
if mgmt_interface is not None:
mgmt_interface_uuids.append(mgmt_interface.uuid)
SubcloudManager.regionone_data['mgmt_interface_uuids'] = mgmt_interface_uuids
SubcloudManager.regionone_data['mgmt_interface_uuids'] = \
mgmt_interface_uuids
SubcloudManager.regionone_data['mgmt_pool'] = \
regionone_sysinv_client.get_management_address_pool()
SubcloudManager.regionone_data['oam_addresses'] = \
@ -3298,7 +3340,8 @@ class SubcloudManager(manager.Manager):
SubcloudManager.regionone_data['expiry'] = \
datetime.datetime.utcnow() + datetime.timedelta(hours=1)
LOG.info("RegionOne cached data updated %s" % SubcloudManager.regionone_data)
LOG.info(
"RegionOne cached data updated %s" % SubcloudManager.regionone_data)
cached_regionone_data = SubcloudManager.regionone_data
return cached_regionone_data

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -116,7 +116,8 @@ class SystemPeerManager(manager.Manager):
"""
if SystemPeerManager.get_subcloud_deploy_status(subcloud) not in (
consts.DEPLOY_STATE_SECONDARY_FAILED,
consts.DEPLOY_STATE_SECONDARY):
consts.DEPLOY_STATE_SECONDARY
):
return False
return True
@ -305,7 +306,7 @@ class SystemPeerManager(manager.Manager):
validation = self._is_valid_for_subcloud_sync(subcloud)
if validation != VERIFY_SUBCLOUD_SYNC_IGNORE and \
validation != VERIFY_SUBCLOUD_SYNC_VALID:
validation != VERIFY_SUBCLOUD_SYNC_VALID:
LOG.error(validation)
error_msg[subcloud_name] = validation
continue
@ -675,7 +676,7 @@ class SystemPeerManager(manager.Manager):
LOG.info(f"Deleted Subcloud Peer Group {peer_group_name} "
f"on peer site.")
except dccommon_exceptions.\
SubcloudPeerGroupDeleteFailedAssociated:
SubcloudPeerGroupDeleteFailedAssociated:
LOG.error(f"Subcloud Peer Group {peer_group_name} "
"delete failed as it is associated with System "
"Peer on peer site.")

View File

@ -1,19 +1,20 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import datetime
import threading
@ -196,7 +197,8 @@ class OrchThread(threading.Thread):
return state_operator(
region_name=OrchThread.get_region_name(strategy_step))
def strategy_step_update(self, subcloud_id, state=None, details=None, stage=None):
def strategy_step_update(
self, subcloud_id, state=None, details=None, stage=None):
"""Update the strategy step in the DB
Sets the start and finished timestamp if necessary, based on state.
@ -219,9 +221,10 @@ class OrchThread(threading.Thread):
finished_at=finished_at)
def _delete_subcloud_worker(self, region, subcloud_id):
db_api.strategy_step_update(self.context,
subcloud_id,
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED)
db_api.strategy_step_update(
self.context,
subcloud_id,
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED)
if region in self.subcloud_workers:
# The orchestration for this subcloud has either
# completed/failed/aborted, remove it from the
@ -387,8 +390,8 @@ class OrchThread(threading.Thread):
continue
elif strategy_step.state == \
consts.STRATEGY_STATE_INITIAL:
if sw_update_strategy.max_parallel_subclouds > len(self.subcloud_workers) \
and not stop:
if sw_update_strategy.max_parallel_subclouds > \
len(self.subcloud_workers) and not stop:
# Don't start upgrading this subcloud if it has been
# unmanaged by the user. If orchestration was already
# started, it will be allowed to complete.

View File

@ -1,20 +1,22 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.orchestrator.orch_thread import OrchThread
@ -28,7 +30,6 @@ from dcmanager.orchestrator.states.patch.job_data import PatchJobData
from dcmanager.orchestrator.states.patch.pre_check import PreCheckState
from dcmanager.orchestrator.states.patch.updating_patches import \
UpdatingPatchesState
from oslo_log import log as logging
LOG = logging.getLogger(__name__)

View File

@ -1,25 +1,26 @@
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# http://www.apache.org/licenses/LICENSE-2.0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import six
import functools
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
import six
from dccommon.subprocess_cleanup import SubprocessCleanup
from dcmanager.common import consts

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon.drivers.openstack import vim
@ -120,8 +121,8 @@ class CreatingVIMStrategyState(BaseState):
raise_error_if_missing=True)
# Check for skip criteria where a failed 'build' might be expected
skip_state = self.skip_check(strategy_step, # pylint: disable=assignment-from-none
subcloud_strategy)
# pylint: disable-next=assignment-from-none
skip_state = self.skip_check(strategy_step, subcloud_strategy)
if skip_state is not None:
self.info_log(strategy_step,
"Skip forward to state:(%s)" % skip_state)

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon.drivers.openstack import vim
@ -30,7 +31,8 @@ class ApplyingVIMStrategyState(BaseState):
def __init__(self, region_name):
super(ApplyingVIMStrategyState, self).__init__(
next_state=consts.STRATEGY_STATE_FINISHING_FW_UPDATE, region_name=region_name)
next_state=consts.STRATEGY_STATE_FINISHING_FW_UPDATE,
region_name=region_name)
self.max_failed_queries = DEFAULT_MAX_FAILED_QUERIES
self.wait_attempts = DEFAULT_MAX_WAIT_ATTEMPTS
self.wait_interval = WAIT_INTERVAL

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon.drivers.openstack import vim
@ -21,7 +22,8 @@ class CreatingVIMStrategyState(BaseState):
def __init__(self, region_name):
super(CreatingVIMStrategyState, self).__init__(
next_state=consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY, region_name=region_name)
next_state=consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY,
region_name=region_name)
# max time to wait for the strategy to be built (in seconds)
# is: sleep_duration * max_queries
self.sleep_duration = DEFAULT_SLEEP_DURATION

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon import consts as dccommon_consts
@ -107,7 +108,8 @@ class FinishingFwUpdateState(BaseState):
break
except Exception:
if fail_counter >= self.max_failed_queries:
raise Exception("Timeout waiting to query subcloud device image info")
raise Exception(
"Timeout waiting to query subcloud device image info")
fail_counter += 1
time.sleep(self.failed_sleep_duration)

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2022 Wind River Systems, Inc.
# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from dccommon import consts as dccommon_consts
@ -20,7 +21,8 @@ class ImportingFirmwareState(BaseState):
def __init__(self, region_name):
super(ImportingFirmwareState, self).__init__(
next_state=consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY, region_name=region_name)
next_state=consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY,
region_name=region_name)
def _image_in_list(self, image, image_list):
# todo(abailey): FUTURE. There may be other ways that two images can
@ -139,8 +141,8 @@ class ImportingFirmwareState(BaseState):
# However, it may not have been applied to this device
device_image_state = None
for device_image_state_obj in subcloud_device_image_states:
if device_image_state_obj.pcidevice_uuid == device.uuid\
and device_image_state_obj.image_uuid == image.uuid:
if device_image_state_obj.pcidevice_uuid == device.uuid \
and device_image_state_obj.image_uuid == image.uuid:
device_image_state = device_image_state_obj
break
else:

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
# Device Image Status - duplicated from sysinv/common/device.py
@ -105,7 +106,7 @@ def determine_image_fields(image):
'bmc',
'retimer_included']
fields = dict((k, str(v)) for (k, v) in vars(image).items()
if k in field_list and not (v is None))
if k in field_list and v)
return fields
@ -116,10 +117,10 @@ def check_for_label_match(subcloud_host_device_label_list,
# todo(abailey): should this compare pci_device_uuid or vendor/device
for device_label in subcloud_host_device_label_list:
if device_label.pcidevice_uuid and \
device_uuid == device_label.pcidevice_uuid and \
label_key == device_label.label_key and \
label_value == device_label.label_value:
return True
device_uuid == device_label.pcidevice_uuid and \
label_key == device_label.label_key and \
label_value == device_label.label_value:
return True
return False

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon.consts import DEFAULT_REGION_NAME
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
@ -48,7 +49,9 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
if to_version is None:
sys_kube_versions = \
self.get_sysinv_client(DEFAULT_REGION_NAME).get_kube_versions()
to_version = dcmanager_utils.get_active_kube_version(sys_kube_versions)
to_version = dcmanager_utils.get_active_kube_version(
sys_kube_versions
)
if to_version is None:
# No active target kube version on the system controller means
# the system controller is part-way through a kube upgrade
@ -59,7 +62,9 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
kube_versions = \
self.get_sysinv_client(region).get_kube_versions()
target_kube_version = \
dcmanager_utils.select_available_kube_version(kube_versions, to_version)
dcmanager_utils.select_available_kube_version(
kube_versions, to_version
)
# Get the update options
opts_dict = dcmanager_utils.get_sw_update_opts(

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2021-2022 Wind River Systems, Inc.
# Copyright (c) 2021-2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import re
from dccommon.consts import DEFAULT_REGION_NAME
@ -14,8 +15,8 @@ from dcmanager.common import utils
from dcmanager.db import api as db_api
from dcmanager.orchestrator.states.base import BaseState
# These following alarms can occur during a vim orchestrated k8s upgrade on the subcloud.
# By ignoring the alarms, subcloud k8s upgrade can be
# These following alarms can occur during a vim orchestrated k8s upgrade on the
# subcloud. By ignoring the alarms, subcloud k8s upgrade can be
# retried after a failure using DC orchestrator.
ALARM_IGNORE_LIST = ['100.003', '200.001', '700.004', '750.006',
'900.007', '900.401']
@ -45,7 +46,8 @@ class KubeUpgradePreCheckState(BaseState):
rather than the 'available' version in the subcloud. This allows
a partially upgraded subcloud to be skipped.
"""
system_health = self.get_sysinv_client(self.region_name).get_kube_upgrade_health()
system_health = self.get_sysinv_client(
self.region_name).get_kube_upgrade_health()
fails = re.findall("\[Fail\]", system_health)
failed_alarm_check = re.findall("No alarms: \[Fail\]", system_health)
no_mgmt_alarms = re.findall("\[0\] of which are management affecting",
@ -57,17 +59,19 @@ class KubeUpgradePreCheckState(BaseState):
for alarm in alarms:
if alarm.alarm_id not in ALARM_IGNORE_LIST:
if alarm.mgmt_affecting == "True":
error_desc_msg = ("Kubernetes upgrade health check failed due to alarm %s. "
"Kubernetes upgrade health: \n %s" %
(alarm.alarm_id, system_health))
error_desc_msg = (
"Kubernetes upgrade health check failed due to alarm "
"%s. Kubernetes upgrade health: \n %s" % (
alarm.alarm_id, system_health))
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
error_description=error_desc_msg)
self.error_log(strategy_step, "\n" + system_health)
raise Exception(("Kubernetes upgrade health check failed due to alarm %s. "
"Please run 'system health-query-kube-upgrade' "
"command on the subcloud or %s on central for details." %
(alarm.alarm_id, ERROR_DESC_CMD)))
raise Exception((
"Kubernetes upgrade health check failed due to alarm "
"%s. Please run 'system health-query-kube-upgrade' "
"command on the subcloud or %s on central for details." %
(alarm.alarm_id, ERROR_DESC_CMD)))
else:
error_desc_msg = ("Kubernetes upgrade health check failed. \n %s" %
system_health)
@ -114,7 +118,9 @@ class KubeUpgradePreCheckState(BaseState):
subcloud_kube_versions = \
self.get_sysinv_client(self.region_name).get_kube_versions()
target_version = \
utils.select_available_kube_version(subcloud_kube_versions, to_version)
utils.select_available_kube_version(
subcloud_kube_versions, to_version
)
self.debug_log(strategy_step,
"Pre-Check. Available Kubernetes upgrade:(%s)"
% target_version)

View File

@ -1,14 +1,15 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import patching_v1
from dcmanager.common import utils
from dcmanager.orchestrator.orch_thread import OrchThread
from oslo_log import log as logging
LOG = logging.getLogger(__name__)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -22,7 +22,7 @@ class PreCheckState(BaseState):
alarms = self.get_fm_client(self.region_name).get_alarms()
for alarm in alarms:
if alarm.mgmt_affecting == "True" and \
alarm.alarm_id not in ignored_alarms:
alarm.alarm_id not in ignored_alarms:
return True
# No management affecting alarms
return False

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -42,7 +42,7 @@ class DeployStartState(BaseState):
# Find the max version deployed on the SystemController
max_version = None
for release_id in deployed_releases:
for release_id, _ in deployed_releases.items():
release_sw_version = deployed_releases[release_id]['sw_version']
if max_version is None or release_sw_version > max_version:
max_version = release_sw_version

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -30,8 +30,10 @@ class FinishStrategyState(BaseState):
state=software_v1.COMMITTED
)
self.debug_log(strategy_step,
"regionone_committed_releases: %s" % regionone_committed_releases)
self.debug_log(
strategy_step,
"regionone_committed_releases: %s" % regionone_committed_releases
)
try:
software_client = self.get_software_client(self.region_name)
@ -66,8 +68,9 @@ class FinishStrategyState(BaseState):
try:
software_client.delete(releases_to_delete)
except Exception:
message = ("Cannot delete releases from subcloud. Please see logs for"
" details.")
message = \
("Cannot delete releases from subcloud. Please see logs for"
" details.")
self.exception_log(strategy_step, message)
raise Exception(message)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -128,14 +128,16 @@ class UploadState(BaseState):
if iso_release in subcloud_releases:
if potential_missing_patches:
# Retrieve patches that are present in the system controller and
# not in the subcloud after uploading load to the subcloud.
# Retrieve patches that are present in the system
# controller and not in the subcloud after uploading
# load to the subcloud.
missing_patches = self. \
_find_missing_patches(subcloud_releases,
potential_missing_patches)
if missing_patches:
message = (f"Release files {missing_patches} are missing")
message = \
(f"Release files {missing_patches} are missing")
self.error_log(strategy_step, message)
raise Exception(message)
break
@ -148,14 +150,19 @@ class UploadState(BaseState):
else:
# No load was uploaded therefore the patches are really missing.
if potential_missing_patches:
message = (f"Release files {potential_missing_patches} are missing")
message = \
(f"Release files {potential_missing_patches} are missing")
self.error_log(strategy_step, message)
raise Exception(message)
if upload_only:
self.info_log(strategy_step,
(f"{consts.EXTRA_ARGS_UPLOAD_ONLY} option enabled, skipping"
f" forward to state:({consts.STRATEGY_STATE_COMPLETE})"))
self.info_log(
strategy_step,
(
f"{consts.EXTRA_ARGS_UPLOAD_ONLY} option enabled, skipping"
f" forward to state:({consts.STRATEGY_STATE_COMPLETE})"
)
)
return consts.STRATEGY_STATE_COMPLETE
return self.next_state

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2022 Wind River Systems, Inc.
# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dcmanager.common import consts
@ -49,8 +50,10 @@ class SwactHostState(BaseState):
active_host = self.get_sysinv_client(region).get_host(self.active)
standby_host = self.get_sysinv_client(region).get_host(self.standby)
# if the desired active host is already the Active Controller, no need for action
if active_host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE:
# if the desired active host is already the Active Controller, no need for
# action
if active_host.capabilities.get('Personality') == \
consts.PERSONALITY_CONTROLLER_ACTIVE:
msg = "Host: %s already the active controller." % (self.active)
self.info_log(strategy_step, msg)
return self.next_state
@ -71,7 +74,8 @@ class SwactHostState(BaseState):
try:
# query the administrative state to see if it is the new state.
host = self.get_sysinv_client(region).get_host(self.active)
if host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE:
if host.capabilities.get('Personality') == \
consts.PERSONALITY_CONTROLLER_ACTIVE:
msg = "Host: %s is now the active controller." % (self.active)
self.info_log(strategy_step, msg)
break

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
import retrying
@ -113,7 +114,8 @@ class UnlockHostState(BaseState):
try:
# query the administrative state to see if it is the new state.
host = self.get_sysinv_client(
strategy_step.subcloud.region_name).get_host(self.target_hostname)
strategy_step.subcloud.region_name
).get_host(self.target_hostname)
if self.check_host_ready(host):
# Success. Break out of the loop.
msg = "Host: %s is now: %s %s %s" % (self.target_hostname,

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dcmanager.common import consts
@ -29,7 +30,8 @@ class ActivatingUpgradeState(BaseState):
def __init__(self, region_name):
super(ActivatingUpgradeState, self).__init__(
next_state=consts.STRATEGY_STATE_COMPLETING_UPGRADE, region_name=region_name)
next_state=consts.STRATEGY_STATE_COMPLETING_UPGRADE,
region_name=region_name)
# max time to wait (in seconds) is: sleep_duration * max_queries
self.sleep_duration = DEFAULT_SLEEP_DURATION
self.max_queries = DEFAULT_MAX_QUERIES

View File

@ -1,17 +1,18 @@
#
# Copyright (c) 2020-2022 Wind River Systems, Inc.
# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import retrying
import time
import retrying
from dcmanager.common import consts
from dcmanager.common.exceptions import StrategyStoppedException
from dcmanager.db import api as db_api
from dcmanager.orchestrator.states.base import BaseState
# Max time: 10 minutes = 60 queries x 10 seconds between each query
DEFAULT_MAX_QUERIES = 60
DEFAULT_SLEEP_DURATION = 10

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dcmanager.common import consts
@ -53,7 +54,8 @@ class DeletingLoadState(BaseState):
# Get a sysinv client each time. It will automatically renew the
# token if it is about to expire.
sysinv_client = self.get_sysinv_client(strategy_step.subcloud.region_name)
sysinv_client = \
self.get_sysinv_client(strategy_step.subcloud.region_name)
if len(sysinv_client.get_loads()) == 1:
msg = "Load %s deleted." % load_version
self.info_log(strategy_step, msg)

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon.drivers.openstack import patching_v1
from dcmanager.common import consts
from dcmanager.common.exceptions import StrategyStoppedException
@ -46,8 +47,9 @@ class FinishingPatchStrategyState(BaseState):
state=patching_v1.PATCH_STATE_COMMITTED
)
self.debug_log(strategy_step,
"regionone_committed_patches: %s" % regionone_committed_patches)
self.debug_log(
strategy_step,
"regionone_committed_patches: %s" % regionone_committed_patches)
committed_patch_ids = list()
for patch_id in regionone_committed_patches.keys():
@ -77,8 +79,9 @@ class FinishingPatchStrategyState(BaseState):
elif subcloud_patches[patch_id]['patchstate'] == \
patching_v1.PATCH_STATE_APPLIED:
if patch_id in committed_patch_ids:
self.info_log(strategy_step,
"Patch %s will be committed in subcloud" % patch_id)
self.info_log(
strategy_step,
"Patch %s will be committed in subcloud" % patch_id)
patches_to_commit.append(patch_id)
if patches_to_delete:

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon.exceptions import LoadMaxReached
@ -31,7 +32,8 @@ class ImportingLoadState(BaseState):
def __init__(self, region_name):
super(ImportingLoadState, self).__init__(
next_state=consts.STRATEGY_STATE_UPDATING_PATCHES, region_name=region_name)
next_state=consts.STRATEGY_STATE_UPDATING_PATCHES,
region_name=region_name)
# max time to wait (in seconds) is: sleep_duration * max_queries
self.sleep_duration = DEFAULT_SLEEP_DURATION
self.max_queries = DEFAULT_MAX_QUERIES
@ -91,8 +93,9 @@ class ImportingLoadState(BaseState):
break
counter += 1
self.debug_log(strategy_step,
"Waiting for load %s to complete, iter=%d" % (request_type, counter))
self.debug_log(
strategy_step,
"Waiting for load %s to complete, iter=%d" % (request_type, counter))
if counter >= self.max_queries:
raise Exception("Timeout waiting for %s to complete"
% request_type)
@ -111,7 +114,8 @@ class ImportingLoadState(BaseState):
self.info_log(strategy_step,
"Load:%s already found" % target_version)
return True, load_info
elif load.state == consts.IMPORTED_LOAD_STATE or load.state == consts.ERROR_LOAD_STATE:
elif load.state == consts.IMPORTED_LOAD_STATE or \
load.state == consts.ERROR_LOAD_STATE:
load_info['load_id'] = load.id
load_info['load_version'] = load.software_version
@ -139,8 +143,8 @@ class ImportingLoadState(BaseState):
if load_id_to_be_deleted is not None:
self.info_log(strategy_step,
"Deleting load %s..." % load_id_to_be_deleted)
self.get_sysinv_client(
strategy_step.subcloud.region_name).delete_load(load_id_to_be_deleted)
self.get_sysinv_client(strategy_step.subcloud.region_name).\
delete_load(load_id_to_be_deleted)
req_info['type'] = LOAD_DELETE_REQUEST_TYPE
self._wait_for_request_to_complete(strategy_step, req_info)
@ -151,14 +155,17 @@ class ImportingLoadState(BaseState):
if subcloud_type == consts.SYSTEM_MODE_SIMPLEX:
# For simplex we only import the load record, not the entire ISO
loads = self._read_from_cache(REGION_ONE_SYSTEM_LOAD_CACHE_TYPE)
matches = [load for load in loads if load.software_version == target_version]
matches = [
load for load in loads if load.software_version == target_version]
target_load = matches[0].to_dict()
# Send only the required fields
creation_keys = ['software_version', 'compatible_version', 'required_patches']
creation_keys = ['software_version',
'compatible_version',
'required_patches']
target_load = {key: target_load[key] for key in creation_keys}
try:
load = self.get_sysinv_client(
strategy_step.subcloud.region_name).import_load_metadata(target_load)
load = self.get_sysinv_client(strategy_step.subcloud.region_name).\
import_load_metadata(target_load)
self.info_log(strategy_step,
"Load: %s is now: %s" % (
load.software_version, load.state))
@ -178,28 +185,34 @@ class ImportingLoadState(BaseState):
load_import_retry_counter += 1
try:
# ISO and SIG files are found in the vault under a version directory
# ISO and SIG files are found in the vault under a version
# directory
self.info_log(strategy_step, "Getting vault load files...")
iso_path, sig_path = utils.get_vault_load_files(target_version)
if not iso_path:
message = ("Failed to get upgrade load info for subcloud %s" %
strategy_step.subcloud.name)
message = (
"Failed to get upgrade load info for subcloud %s" %
strategy_step.subcloud.name)
raise Exception(message)
# Call the API. import_load blocks until the load state is 'importing'
# Call the API. import_load blocks until the load state is
# 'importing'
self.info_log(strategy_step, "Sending load import request...")
load = self.get_sysinv_client(
strategy_step.subcloud.region_name).import_load(iso_path, sig_path)
strategy_step.subcloud.region_name
).import_load(iso_path, sig_path)
break
except VaultLoadMissingError:
raise
except LoadMaxReached:
# A prior import request may have encountered an exception but the request actually
# continued with the import operation in the subcloud. This has been observed when performing
# multiple parallel upgrade in which resource/link may be saturated. In such case allow continue
# for further checks (i.e. at wait_for_request_to_complete)
# A prior import request may have encountered an exception but
# the request actually continued with the import operation in the
# subcloud. This has been observed when performing multiple
# parallel upgrade in which resource/link may be saturated.
# In such case allow continue for further checks
# (i.e. at wait_for_request_to_complete)
self.info_log(strategy_step,
"Load at max number of loads")
break
@ -209,13 +222,14 @@ class ImportingLoadState(BaseState):
(e, load_import_retry_counter))
if load_import_retry_counter >= self.max_load_import_retries:
self.error_log(strategy_step, str(e))
raise Exception("Failed to import load. Please check sysinv.log on "
"the subcloud for details.")
raise Exception("Failed to import load. Please check "
"sysinv.log on the subcloud for details.")
time.sleep(self.sleep_duration)
if load is None:
_, load_info = self._get_subcloud_load_info(strategy_step, target_version)
_, load_info = self._get_subcloud_load_info(
strategy_step, target_version)
load_id = load_info.get('load_id')
software_version = load_info['load_version']
else:
@ -228,14 +242,16 @@ class ImportingLoadState(BaseState):
if software_version != target_version:
raise Exception("The imported load was not the expected version.")
try:
self.info_log(strategy_step,
"Load import request accepted, load software version = %s"
% software_version)
self.info_log(
strategy_step,
"Load import request accepted, load software version = %s"
% software_version)
req_info['load_id'] = load_id
req_info['load_version'] = target_version
req_info['type'] = LOAD_IMPORT_REQUEST_TYPE
self.info_log(strategy_step,
"Waiting for state to change from importing to imported...")
self.info_log(
strategy_step,
"Waiting for state to change from importing to imported...")
self._wait_for_request_to_complete(strategy_step, req_info)
except Exception as e:
self.error_log(strategy_step, str(e))

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import exceptions
@ -52,9 +53,10 @@ class InstallingLicenseState(BaseState):
return self.next_state
else:
# An unexpected error occurred querying the license
message = ('An unexpected error occurred querying the license %s. Detail: %s' %
(dccommon_consts.SYSTEM_CONTROLLER_NAME,
target_error))
message = (
'An unexpected error occurred querying the license %s. '
'Detail: %s' % (dccommon_consts.SYSTEM_CONTROLLER_NAME,
target_error))
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
error_description=message[0:consts.ERROR_DESCRIPTION_LENGTH])

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import time
@ -47,7 +48,8 @@ class MigratingDataState(BaseState):
def __init__(self, region_name):
super(MigratingDataState, self).__init__(
next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_0, region_name=region_name)
next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_0,
region_name=region_name)
self.max_api_queries = DEFAULT_MAX_API_QUERIES
self.api_sleep_duration = DEFAULT_API_SLEEP
@ -141,8 +143,9 @@ class MigratingDataState(BaseState):
strategy_step.subcloud.name + consts.INVENTORY_FILE_POSTFIX)
log_file = os.path.join(consts.DC_ANSIBLE_LOG_DIR, subcloud.name) + \
'_playbook_output.log'
# Send skip_patching=true to prevent the playbook from applying any patches present in the
# upgrade_data. All the required patches will be included in the generated install iso.
# Send skip_patching=true to prevent the playbook from applying any patches
# present in the upgrade_data. All the required patches will be included in
# the generated install iso.
data_migrating_cmd = [
"ansible-playbook", ANSIBLE_UPGRADE_PLAYBOOK,
"-i", ansible_subcloud_inventory_file, "-e",
@ -156,7 +159,8 @@ class MigratingDataState(BaseState):
# Two error messages: one for subcloud error description and logs and
# one for orchestrator strategy_step detail (shorter than the previous).
msg_subcloud = utils.find_ansible_error_msg(
strategy_step.subcloud.name, log_file, consts.DEPLOY_STATE_MIGRATING_DATA)
strategy_step.subcloud.name, log_file,
consts.DEPLOY_STATE_MIGRATING_DATA)
# Get script output in case it is available
error_msg = utils.get_failure_msg(strategy_step.subcloud.region_name)
failure = ('%s \n%s' % (error_msg, msg_subcloud))

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import copy
import re
@ -40,10 +41,12 @@ class PreCheckState(BaseState):
def __init__(self, region_name):
super(PreCheckState, self).__init__(
next_state=consts.STRATEGY_STATE_INSTALLING_LICENSE, region_name=region_name)
next_state=consts.STRATEGY_STATE_INSTALLING_LICENSE,
region_name=region_name)
def _check_health(self, strategy_step, subcloud_sysinv_client, subcloud_fm_client,
host, upgrades):
def _check_health(
self, strategy_step, subcloud_sysinv_client, subcloud_fm_client,
host, upgrades):
# Check system upgrade health
#
@ -112,21 +115,22 @@ class PreCheckState(BaseState):
if not failed_alarm_check:
# Health check failure: no alarms involved
#
# These could be Kubernetes or other related failure(s) which has not been been
# converted into an alarm condition.
# These could be Kubernetes or other related failure(s) which has not
# been been converted into an alarm condition.
error_desc_msg = ("System upgrade health check failed. \n %s" %
fails)
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
error_description=error_desc_msg[0:consts.ERROR_DESCRIPTION_LENGTH])
details = ("System upgrade health check failed. Please run 'system health-query-upgrade' "
"command on the subcloud or %s on central for details"
% (consts.ERROR_DESC_CMD))
details = (
"System upgrade health check failed. Please run "
"'system health-query-upgrade' command on the subcloud or %s "
"on central for details" % (consts.ERROR_DESC_CMD))
self.error_log(strategy_step, "\n" + system_health)
raise PreCheckFailedException(
subcloud=strategy_step.subcloud.name,
details=details,
)
)
else:
# Health check failure: one or more alarms
if (upgrades and (len(fails) == len(alarm_ignore_list))):
@ -139,38 +143,42 @@ class PreCheckState(BaseState):
for alarm in alarms:
if alarm.alarm_id not in alarm_ignore_list:
if alarm.mgmt_affecting == "True":
error_desc_msg = ("System upgrade health check failed due to alarm %s. "
"System upgrade health: \n %s" %
(alarm.alarm_id, system_health))
error_desc_msg = (
"System upgrade health check failed due to "
"alarm %s. System upgrade health: \n %s" %
(alarm.alarm_id, system_health))
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
error_description=error_desc_msg[0:consts.ERROR_DESCRIPTION_LENGTH])
details = ("System upgrade health check failed due to alarm %s. "
"Please run 'system health-query-upgrade' "
"command on the subcloud or %s on central for details." %
(alarm.alarm_id, consts.ERROR_DESC_CMD))
error_description=error_desc_msg[
0:consts.ERROR_DESCRIPTION_LENGTH])
details = (
"System upgrade health check failed due to "
"alarm %s. Please run 'system health-query-upgrade' "
"command on the subcloud or %s on central for "
"details." % (alarm.alarm_id, consts.ERROR_DESC_CMD))
self.error_log(strategy_step, "\n" + system_health)
raise PreCheckFailedException(
subcloud=strategy_step.subcloud.name,
details=details,
)
)
else:
# Multiple failures
error_desc_msg = ("System upgrade health check failed due to multiple failures. "
"Health: \n %s" %
(system_health))
error_desc_msg = (
"System upgrade health check failed due to multiple failures. "
"Health: \n %s" % system_health)
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
error_description=error_desc_msg[0:consts.ERROR_DESCRIPTION_LENGTH])
details = ("System upgrade health check failed due to multiple failures. "
"Please run 'system health-query-upgrade' command on the "
"subcloud or %s on central for details." %
(consts.ERROR_DESC_CMD))
error_description=error_desc_msg[
0:consts.ERROR_DESCRIPTION_LENGTH])
details = (
"System upgrade health check failed due to multiple failures. "
"Please run 'system health-query-upgrade' command on the "
"subcloud or %s on central for details." % consts.ERROR_DESC_CMD)
self.error_log(strategy_step, "\n" + system_health)
raise PreCheckFailedException(
subcloud=strategy_step.subcloud.name,
details=details,
)
)
def _check_scratch(self, strategy_step, subcloud_sysinv_client, host):
scratch_fs = subcloud_sysinv_client.get_host_filesystem(
@ -182,7 +190,7 @@ class PreCheckState(BaseState):
raise PreCheckFailedException(
subcloud=strategy_step.subcloud.name,
details=details,
)
)
def _perform_subcloud_online_checks(self, strategy_step, subcloud_sysinv_client,
subcloud_fm_client, host, upgrades):
@ -204,8 +212,10 @@ class PreCheckState(BaseState):
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE:
subcloud_sysinv_client = None
try:
subcloud_sysinv_client = self.get_sysinv_client(strategy_step.subcloud.region_name)
subcloud_fm_client = self.get_fm_client(strategy_step.subcloud.region_name)
subcloud_sysinv_client = \
self.get_sysinv_client(strategy_step.subcloud.region_name)
subcloud_fm_client = \
self.get_fm_client(strategy_step.subcloud.region_name)
except Exception:
# if getting the token times out, the orchestrator may have
# restarted and subcloud may be offline; so will attempt
@ -233,14 +243,15 @@ class PreCheckState(BaseState):
subcloud=strategy_step.subcloud.name,
details=details)
sc_status = subcloud.deploy_status
if (host.administrative == consts.ADMIN_LOCKED and
(subcloud.deploy_status == consts.DEPLOY_STATE_INSTALL_FAILED or
subcloud.deploy_status == consts.DEPLOY_STATE_PRE_INSTALL_FAILED)):
# If the subcloud is online but its deploy state is pre-install-failed
# or install-failed and the subcloud host is locked, the upgrading
# simplex step must have failed early in the previous upgrade attempt.
# The pre-check should transition directly to upgrading simplex step in the
# retry.
(sc_status == consts.DEPLOY_STATE_INSTALL_FAILED or
sc_status == consts.DEPLOY_STATE_PRE_INSTALL_FAILED)):
# If the subcloud is online but its deploy state is
# pre-install-failed or install-failed and the subcloud host is
# locked, the upgrading simplex step must have failed early in
# the previous upgrade attempt. The pre-check should transition
# directly to upgrading simplex step in the retry.
self.override_next_state(consts.STRATEGY_STATE_UPGRADING_SIMPLEX)
return self.next_state
@ -256,103 +267,122 @@ class PreCheckState(BaseState):
host, upgrades)
if subcloud.deploy_status == consts.DEPLOY_STATE_UPGRADE_ACTIVATED:
# If the subcloud has completed upgrade activation, advance directly
# to completing step.
self.override_next_state(consts.STRATEGY_STATE_COMPLETING_UPGRADE)
elif subcloud.deploy_status == consts.DEPLOY_STATE_DATA_MIGRATION_FAILED:
# If the subcloud has completed upgrade activation,
# advance directly to completing step.
self.override_next_state(
consts.STRATEGY_STATE_COMPLETING_UPGRADE
)
elif subcloud.deploy_status == \
consts.DEPLOY_STATE_DATA_MIGRATION_FAILED:
# If the subcloud deploy status is data-migration-failed but
# it is online and has passed subcloud online checks, it must have
# timed out while waiting for the subcloud to unlock previously and
# has succesfully been unlocked since. Update the subcloud deploy
# status and advance to activating upgrade step.
# it is online and has passed subcloud online checks, it must
# have timed out while waiting for the subcloud to unlock
# previously and has succesfully been unlocked since. Update
# the subcloud deploy status and advance to activating upgrade
# step.
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
deploy_status=consts.DEPLOY_STATE_MIGRATED)
self.override_next_state(consts.STRATEGY_STATE_ACTIVATING_UPGRADE)
self.override_next_state(
consts.STRATEGY_STATE_ACTIVATING_UPGRADE
)
elif subcloud.deploy_status == consts.DEPLOY_STATE_MIGRATED:
# If the subcloud deploy status is migrated but it is online, it
# must have undergone 2 upgrade attempts:
# - in 1st upgrade attempt: strategy timed out while waiting
# for the subcloud to unlock
# - in 2nd upgrade attempt: the subcloud was unlocked successfully
# (with or without manual interventions) but failed to activate.
# - in 2nd upgrade attempt: the subcloud was unlocked
# successfully (with or without manual interventions) but
# failed to activate.
# Advance to activating upgrade step so activation can be retried
# after the manual intervention.
self.override_next_state(consts.STRATEGY_STATE_ACTIVATING_UPGRADE)
self.override_next_state(
consts.STRATEGY_STATE_ACTIVATING_UPGRADE
)
else:
# Duplex case
if upgrades:
# If upgrade has started, skip subcloud online checks
self.info_log(strategy_step, "Online subcloud checks skipped.")
upgrade_state = upgrades[0].state
if(upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED or
upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION):
controllers_state = consts.UPGRADE_STATE_UPGRADING_CONTROLLERS
migration_complete = consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE
if (upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED
or upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION):
error_message = "upgrade state: %s" % upgrade_state
raise ManualRecoveryRequiredException(
subcloud=strategy_step.subcloud.name,
error_message=error_message)
elif(upgrade_state == consts.UPGRADE_STATE_UPGRADING_CONTROLLERS or
upgrade_state == consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE):
# At this point the subcloud is duplex, deploy state is complete
# and "system upgrade-show" on the subcloud indicates that the
# upgrade state is "upgrading-controllers".
elif (upgrade_state == controllers_state or
upgrade_state == migration_complete):
# At this point the subcloud is duplex, deploy state is
# completeand "system upgrade-show" on the subcloud indicates
# that the upgrade state is "upgrading-controllers".
# If controller-1 is locked then we unlock it,
# if controller-0 is active we need to swact
# else we can proceed to create the VIM strategy.
controller_1_host = subcloud_sysinv_client.get_host("controller-1")
controller_1_host = subcloud_sysinv_client.get_host(
"controller-1")
if controller_1_host.administrative == consts.ADMIN_LOCKED:
self.override_next_state(
consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1)
elif host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE:
elif host.capabilities.get('Personality') == \
consts.PERSONALITY_CONTROLLER_ACTIVE:
self.override_next_state(
consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_1)
else:
self.override_next_state(
consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY)
elif (upgrade_state == consts.UPGRADE_STATE_UPGRADING_HOSTS):
# At this point the subcloud is duplex, deploy state is complete
# and "system upgrade-show" on the subcloud indicates that the
# upgrade state is "upgrading-hosts".
elif upgrade_state == consts.UPGRADE_STATE_UPGRADING_HOSTS:
# At this point the subcloud is duplex, deploy state is
# complete and "system upgrade-show" on the subcloud
# indicates that theupgrade state is "upgrading-hosts".
# If both subcloud hosts are upgraded to the newer load,
# we resume the state machine from activate upgrade state.
# Otherwise, we resume from create the VIM strategy state.
# determine the version of the system controller in region one
target_version = \
self._read_from_cache(REGION_ONE_SYSTEM_INFO_CACHE_TYPE)\
.software_version
# determine the version of the system controller in regionone
target_version = self._read_from_cache(
REGION_ONE_SYSTEM_INFO_CACHE_TYPE).software_version
all_hosts_upgraded = True
subcloud_hosts = self.get_sysinv_client(
strategy_step.subcloud.region_name).get_hosts()
for subcloud_host in subcloud_hosts:
if(subcloud_host.software_load != target_version or
subcloud_host.administrative == consts.ADMIN_LOCKED or
subcloud_host.operational == consts.OPERATIONAL_DISABLED):
is_locked = (subcloud_host.administrative ==
consts.ADMIN_LOCKED)
is_disabled = (subcloud_host.operational ==
consts.OPERATIONAL_DISABLED)
create_vim_state = \
consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY
if (subcloud_host.software_load != target_version or
is_locked or is_disabled):
all_hosts_upgraded = False
self.override_next_state(
consts.STRATEGY_STATE_CREATING_VIM_UPGRADE_STRATEGY)
self.override_next_state(create_vim_state)
if all_hosts_upgraded:
if host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE:
if host.capabilities.get('Personality') == \
consts.PERSONALITY_CONTROLLER_ACTIVE:
self.override_next_state(
consts.STRATEGY_STATE_ACTIVATING_UPGRADE)
else:
self.override_next_state(
consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0)
elif (upgrade_state == consts.UPGRADE_STATE_ACTIVATION_FAILED):
if(host.capabilities.get('Personality') == consts.PERSONALITY_CONTROLLER_ACTIVE):
elif upgrade_state == consts.UPGRADE_STATE_ACTIVATION_FAILED:
if (host.capabilities.get('Personality') ==
consts.PERSONALITY_CONTROLLER_ACTIVE):
self.override_next_state(
consts.STRATEGY_STATE_ACTIVATING_UPGRADE)
else:
self.override_next_state(
consts.STRATEGY_STATE_SWACTING_TO_CONTROLLER_0)
elif (upgrade_state == consts.UPGRADE_STATE_ACTIVATION_COMPLETE):
self.override_next_state(consts.STRATEGY_STATE_COMPLETING_UPGRADE)
elif upgrade_state == consts.UPGRADE_STATE_ACTIVATION_COMPLETE:
self.override_next_state(
consts.STRATEGY_STATE_COMPLETING_UPGRADE)
else:
# Perform subcloud online check for duplex and proceed to the next step
# (i.e. installing license)
# Perform subcloud online check for duplex and proceed to the
# next step (i.e. installing license)
self._perform_subcloud_online_checks(strategy_step,
subcloud_sysinv_client,
subcloud_fm_client,

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon.drivers.openstack.vim import ALARM_RESTRICTIONS_RELAXED
@ -96,10 +97,12 @@ class StartingUpgradeState(BaseState):
if upgrade_state in UPGRADE_RETRY_STATES:
retry_counter += 1
if retry_counter >= self.max_failed_retries:
error_msg = utils.get_failure_msg(strategy_step.subcloud.region_name)
error_msg = utils.get_failure_msg(
strategy_step.subcloud.region_name)
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
error_description=error_msg[0:consts.ERROR_DESCRIPTION_LENGTH])
error_description=error_msg[
0:consts.ERROR_DESCRIPTION_LENGTH])
details = ("Failed to start upgrade. Please "
"check sysinv.log on the subcloud or "
"%s on central for details." %
@ -110,7 +113,8 @@ class StartingUpgradeState(BaseState):
% upgrade_state)
try:
self.get_sysinv_client(
strategy_step.subcloud.region_name).upgrade_start(force=force_flag)
strategy_step.subcloud.region_name).upgrade_start(
force=force_flag)
except Exception as exception:
self.warn_log(strategy_step,
"Encountered exception: %s, "

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2022-2023 Wind River Systems, Inc.
# Copyright (c) 2022-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dcmanager.common import consts
@ -48,14 +49,16 @@ class TransferCACertificateState(BaseState):
retry_counter = 0
while True:
try:
sysinv_client = self.get_sysinv_client(strategy_step.subcloud.region_name)
sysinv_client = \
self.get_sysinv_client(strategy_step.subcloud.region_name)
data = {'mode': 'openldap_ca'}
ldap_ca_cert, ldap_ca_key = utils.get_certificate_from_secret(
consts.OPENLDAP_CA_CERT_SECRET_NAME,
consts.CERT_NAMESPACE_PLATFORM_CA_CERTS)
sysinv_client.update_certificate('', ldap_ca_cert + ldap_ca_key, data)
sysinv_client.update_certificate(
'', ldap_ca_cert + ldap_ca_key, data)
break
except Exception as e:
self.warn_log(strategy_step,

View File

@ -1,8 +1,9 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import time
@ -69,7 +70,8 @@ class UpdatingPatchesState(BaseState):
patching_v1.PATCH_STATE_APPLIED,
patching_v1.PATCH_STATE_COMMITTED]:
applied_patch_ids.append(patch_id)
self.debug_log(strategy_step, "RegionOne applied_patch_ids: %s" % applied_patch_ids)
self.debug_log(strategy_step,
"RegionOne applied_patch_ids: %s" % applied_patch_ids)
region = self.get_region_name(strategy_step)
# Retrieve all the patches that are present in this subcloud.
@ -96,7 +98,7 @@ class UpdatingPatchesState(BaseState):
if subcloud_patches[patch_id]['repostate'] == \
patching_v1.PATCH_STATE_APPLIED:
if subcloud_patches[patch_id]['patchstate'] != \
patching_v1.PATCH_STATE_APPLIED:
patching_v1.PATCH_STATE_APPLIED:
self.info_log(strategy_step,
"Patch %s will be removed from subcloud" %
(patch_id))

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -24,7 +24,9 @@ class UpgradingDuplexState(BaseState):
def __init__(self, region_name):
super(UpgradingDuplexState, self).__init__(
next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1, region_name=region_name)
next_state=consts.STRATEGY_STATE_UNLOCKING_CONTROLLER_1,
region_name=region_name
)
self.target_hostname = "controller-1"
# max time to wait (in seconds) is: sleep_duration * max_queries
self.sleep_duration = DEFAULT_SLEEP_DURATION
@ -61,8 +63,12 @@ class UpgradingDuplexState(BaseState):
upgrades = self.get_sysinv_client(region).get_upgrades()
if len(upgrades) != 0:
if (upgrades[0].state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED or
upgrades[0].state == consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE):
if (
upgrades[0].state ==
consts.UPGRADE_STATE_DATA_MIGRATION_FAILED or
upgrades[0].state ==
consts.UPGRADE_STATE_DATA_MIGRATION_COMPLETE
):
msg = "Upgrade state is %s now" % (upgrades[0].state)
self.info_log(strategy_step, msg)
break
@ -79,10 +85,11 @@ class UpgradingDuplexState(BaseState):
continue
api_counter += 1
if api_counter >= self.max_queries:
raise Exception("Timeout waiting for update state to be updated to "
"updated to 'data-migration-failed' or 'data-migration-complete'."
"Please check sysinv.log on the subcloud "
"for details.")
raise Exception(
"Timeout waiting for update state to be updated to "
"'data-migration-failed' or 'data-migration-complete'. "
"Please check sysinv.log on the subcloud for details."
)
time.sleep(self.sleep_duration)
# If the upgrade state is 'data-migration-complete' we move to the
@ -95,7 +102,9 @@ class UpgradingDuplexState(BaseState):
# The list of upgrades will never contain more than one entry.
if upgrades[0].state == consts.UPGRADE_STATE_DATA_MIGRATION_FAILED:
raise Exception("Data migration failed on host %s" % self.target_hostname)
raise Exception(
"Data migration failed on host %s" % self.target_hostname
)
# If we reach at this point, the upgrade state is 'data-migration-complete'
# and we can move to the next state.

View File

@ -1,12 +1,13 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import json
import keyring
import os
import keyring
from oslo_serialization import base64
from tsconfig.tsconfig import SW_VERSION
@ -88,7 +89,8 @@ class UpgradingSimplexState(BaseState):
subcloud data_install are obtained from:
dcmanager database:
subcloud.subcloud_install_initial::for values which are persisted at subcloud_add time
subcloud.subcloud_install_initial::for values which are persisted at
subcloud_add time
INSTALL: (needed for upgrade install)
bootstrap_interface
@ -103,7 +105,8 @@ class UpgradingSimplexState(BaseState):
# Set this options for https with self-signed certificate
# no_check_certificate
# Override default filesystem device: also from host-show, but is static.
# Override default filesystem device: also from host-show, but is
static.
# rootfs_device: "/dev/disk/by-path/pci-0000:00:1f.2-ata-1.0"
# boot_device: "/dev/disk/by-path/pci-0000:00:1f.2-ata-1.0"
@ -111,22 +114,24 @@ class UpgradingSimplexState(BaseState):
# rd.net.timeout.ipv6dad: 300
BOOTSTRAP: (also needed for bootstrap)
# If the subcloud's bootstrap IP interface and the system controller are not on the
# same network then the customer must configure a default route or static route
# so that the Central Cloud can login bootstrap the newly installed subcloud.
# If nexthop_gateway is specified and the network_address is not specified then a
# default route will be configured. Otherwise, if a network_address is specified
then
# a static route will be configured.
# If the subcloud's bootstrap IP interface and the system controller
# are not on the same network then the customer must configure a
# default route or static route so that the Central Cloud can login
# bootstrap the newly installed subcloud. If nexthop_gateway is
# specified and the network_address is not specified then a default
# route will be configured. Otherwise, if a network_address is
# specified then a static route will be configured.
nexthop_gateway: default_route_address
network_address: static_route_address
network_mask: static_route_mask
subcloud.data_upgrade - persist for upgrade duration
for values from subcloud online sysinv host-show (persist since upgrade-start)
for values from subcloud online sysinv host-show
(persist since upgrade-start)
bmc_address # sysinv_v1 host-show
bmc_username # sysinv_v1 host-show
for values from barbican_client (as barbican user), or from upgrade-start:
for values from barbican_client (as barbican user),
or from upgrade-start:
bmc_password --- obtain from barbican_client as barbican user
"""
@ -239,7 +244,7 @@ class UpgradingSimplexState(BaseState):
return upgrade_data_install
def _get_subcloud_upgrade_data(
self, strategy_step, subcloud_sysinv_client, subcloud_barbican_client):
self, strategy_step, subcloud_sysinv_client, subcloud_barbican_client):
"""Get the subcloud data required for upgrades.
In case the subcloud is no longer reachable, get upgrade_data from
@ -323,8 +328,9 @@ class UpgradingSimplexState(BaseState):
def perform_subcloud_install(self, strategy_step, install_values):
log_file = os.path.join(consts.DC_ANSIBLE_LOG_DIR, strategy_step.subcloud.name) + \
'_playbook_output.log'
log_file = os.path.join(
consts.DC_ANSIBLE_LOG_DIR,
strategy_step.subcloud.name) + '_playbook_output.log'
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
deploy_status=consts.DEPLOY_STATE_PRE_INSTALL)
@ -375,7 +381,8 @@ class UpgradingSimplexState(BaseState):
# Detailed error message for subcloud error description field.
# Exception message for strategy_step detail.
msg = utils.find_ansible_error_msg(
strategy_step.subcloud.name, log_file, consts.DEPLOY_STATE_INSTALLING)
strategy_step.subcloud.name, log_file,
consts.DEPLOY_STATE_INSTALLING)
db_api.subcloud_update(
self.context, strategy_step.subcloud_id,
deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED,

View File

@ -1,26 +1,26 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import shutil
import threading
from oslo_config import cfg
from oslo_log import log as logging
from tsconfig.tsconfig import SW_VERSION
from dccommon import consts as dccommon_consts
@ -137,7 +137,9 @@ class SwUpdateManager(manager.Manager):
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# force option only has an effect in offline case for upgrade
if force and (availability_status != dccommon_consts.AVAILABILITY_ONLINE):
if force and (
availability_status != dccommon_consts.AVAILABILITY_ONLINE
):
if cfg.CONF.use_usm:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
@ -352,14 +354,16 @@ class SwUpdateManager(manager.Manager):
else:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require upgrade' % cloud_name)
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_FIRMWARE)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require firmware update'
@ -372,7 +376,8 @@ class SwUpdateManager(manager.Manager):
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id,
dccommon_consts.ENDPOINT_TYPE_KUBERNETES)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require kubernetes update'
@ -385,7 +390,8 @@ class SwUpdateManager(manager.Manager):
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id,
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require kube rootca update'
@ -394,7 +400,8 @@ class SwUpdateManager(manager.Manager):
# Make sure subcloud requires patching
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_PATCHING)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require patching' % cloud_name)
@ -469,7 +476,8 @@ class SwUpdateManager(manager.Manager):
continue
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:
if not force:
continue
elif cfg.CONF.use_usm:
@ -490,7 +498,8 @@ class SwUpdateManager(manager.Manager):
msg='Upgrade sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_PATCH:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_PATCHING and
@ -501,7 +510,8 @@ class SwUpdateManager(manager.Manager):
msg='Patching sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_FIRMWARE and
@ -512,7 +522,8 @@ class SwUpdateManager(manager.Manager):
msg='Firmware sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_KUBERNETES and
@ -523,7 +534,8 @@ class SwUpdateManager(manager.Manager):
msg='Kubernetes sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
@ -552,7 +564,8 @@ class SwUpdateManager(manager.Manager):
max_parallel_subclouds = 1
if max_parallel_subclouds is None:
max_parallel_subclouds = consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS
max_parallel_subclouds = (
consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS)
strategy_step_created = False
# Create the strategy

View File

@ -1,19 +1,20 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.orchestrator.orch_thread import OrchThread
@ -88,7 +89,8 @@ class SwUpgradeOrchThread(OrchThread):
consts.STRATEGY_STATE_FINISHING_PATCH_STRATEGY:
FinishingPatchStrategyState,
consts.STRATEGY_STATE_STARTING_UPGRADE: StartingUpgradeState,
consts.STRATEGY_STATE_TRANSFERRING_CA_CERTIFICATE: TransferCACertificateState,
consts.STRATEGY_STATE_TRANSFERRING_CA_CERTIFICATE:
TransferCACertificateState,
consts.STRATEGY_STATE_LOCKING_CONTROLLER_0: LockSimplexState,
consts.STRATEGY_STATE_LOCKING_CONTROLLER_1: LockDuplexState,
consts.STRATEGY_STATE_UPGRADING_SIMPLEX: UpgradingSimplexState,

View File

@ -1,15 +1,17 @@
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
@ -83,12 +85,11 @@ class SubcloudStateClient(RPCClient):
update_state_only=update_state_only,
audit_fail_count=audit_fail_count))
def update_subcloud_endpoint_status(self, ctxt, subcloud_name=None,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=None,
alarmable=True):
def update_subcloud_endpoint_status(
self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None,
alarmable=True
):
# Note: This is an asynchronous operation.
# See below for synchronous method call
return self.cast(ctxt, self.make_msg('update_subcloud_endpoint_status',
@ -99,12 +100,11 @@ class SubcloudStateClient(RPCClient):
ignore_endpoints=ignore_endpoints,
alarmable=alarmable))
def update_subcloud_endpoint_status_sync(self, ctxt, subcloud_name=None,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=None,
alarmable=True):
def update_subcloud_endpoint_status_sync(
self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None,
alarmable=True
):
# Note: synchronous
return self.call(ctxt, self.make_msg('update_subcloud_endpoint_status',
subcloud_name=subcloud_name,
@ -144,16 +144,20 @@ class ManagerClient(RPCClient):
return self.call(ctxt, self.make_msg('delete_subcloud',
subcloud_id=subcloud_id))
def rename_subcloud(self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None):
def rename_subcloud(
self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None
):
return self.call(ctxt, self.make_msg('rename_subcloud',
subcloud_id=subcloud_id,
curr_subcloud_name=curr_subcloud_name,
new_subcloud_name=new_subcloud_name))
def update_subcloud(self, ctxt, subcloud_id, management_state=None,
description=None, location=None, group_id=None,
data_install=None, force=None,
deploy_status=None, peer_group_id=None, bootstrap_values=None, bootstrap_address=None):
def update_subcloud(
self, ctxt, subcloud_id, management_state=None, description=None,
location=None, group_id=None, data_install=None, force=None,
deploy_status=None, peer_group_id=None, bootstrap_values=None,
bootstrap_address=None
):
return self.call(ctxt, self.make_msg('update_subcloud',
subcloud_id=subcloud_id,
management_state=management_state,
@ -242,11 +246,12 @@ class ManagerClient(RPCClient):
def subcloud_deploy_resume(self, ctxt, subcloud_id, subcloud_name,
payload, deploy_states_to_run):
return self.cast(ctxt, self.make_msg('subcloud_deploy_resume',
subcloud_id=subcloud_id,
subcloud_name=subcloud_name,
payload=payload,
deploy_states_to_run=deploy_states_to_run))
return self.cast(ctxt, self.make_msg(
'subcloud_deploy_resume',
subcloud_id=subcloud_id,
subcloud_name=subcloud_name,
payload=payload,
deploy_states_to_run=deploy_states_to_run))
def get_subcloud_name_by_region_name(self, ctxt, subcloud_region):
return self.call(ctxt, self.make_msg('get_subcloud_name_by_region_name',

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -112,12 +112,11 @@ class DCManagerStateService(service.Service):
super(DCManagerStateService, self).stop()
@request_context
def update_subcloud_endpoint_status(self, context, subcloud_name=None,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None):
def update_subcloud_endpoint_status(
self, context, subcloud_name=None, subcloud_region=None, endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, alarmable=True,
ignore_endpoints=None
):
# Updates subcloud endpoint sync status
LOG.info("Handling update_subcloud_endpoint_status request for "
"subcloud: (%s) endpoint: (%s) status:(%s) "

View File

@ -10,32 +10,30 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from fm_api import constants as fm_const
from fm_api import fm_api
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dcorch.rpc import client as dcorch_rpc_client
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.common import manager
from dcmanager.common import utils
from dcmanager.rpc import client as rpc_client
from dcmanager.db import api as db_api
from fm_api import constants as fm_const
from fm_api import fm_api
from dcmanager.rpc import client as rpc_client
from dcorch.rpc import client as dcorch_rpc_client
LOG = logging.getLogger(__name__)
ALARM_OUT_OF_SYNC = fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC
def sync_update_subcloud_endpoint_status(func):
@ -133,27 +131,31 @@ class SubcloudStateManager(manager.Manager):
# Trigger subcloud audits for the subcloud after
# its identity endpoint turns to other status from unknown
is_sync_unknown = sync_status != dccommon_consts.SYNC_STATUS_UNKNOWN
is_identity_unknown = (
original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN
)
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY \
and sync_status != dccommon_consts.SYNC_STATUS_UNKNOWN \
and original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
and is_sync_unknown and is_identity_unknown:
if not subcloud.first_identity_sync_complete:
db_api.subcloud_update(context, subcloud_id,
first_identity_sync_complete=True)
LOG.debug('Request for audits for %s after updating '
'identity out of unknown' % subcloud.name)
self.audit_rpc_client.trigger_subcloud_audits(context, subcloud_id)
self.audit_rpc_client.trigger_subcloud_audits(
context, subcloud_id)
entity_instance_id = "subcloud=%s.resource=%s" % \
(subcloud.name, endpoint_type)
fault = self.fm_api.get_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC,
ALARM_OUT_OF_SYNC,
entity_instance_id)
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
and fault:
try:
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
ALARM_OUT_OF_SYNC,
entity_instance_id)
except Exception as e:
LOG.exception(e)
@ -162,8 +164,9 @@ class SubcloudStateManager(manager.Manager):
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
fault = fm_api.Fault(
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
alarm_id=ALARM_OUT_OF_SYNC,
alarm_state=fm_const.FM_ALARM_STATE_SET,
entity_type_id=entity_type_id,
entity_instance_id=entity_instance_id,
@ -208,7 +211,7 @@ class SubcloudStateManager(manager.Manager):
(subcloud.name, endpoint)
fault = self.fm_api.get_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC,
ALARM_OUT_OF_SYNC,
entity_instance_id)
# TODO(yuxing): batch clear all the out-of-sync alarms of a
@ -219,7 +222,7 @@ class SubcloudStateManager(manager.Manager):
and fault:
try:
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
ALARM_OUT_OF_SYNC,
entity_instance_id)
except Exception as e:
LOG.exception(e)
@ -229,7 +232,7 @@ class SubcloudStateManager(manager.Manager):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
fault = fm_api.Fault(
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
alarm_id=ALARM_OUT_OF_SYNC,
alarm_state=fm_const.FM_ALARM_STATE_SET,
entity_type_id=entity_type_id,
entity_instance_id=entity_instance_id,
@ -250,9 +253,11 @@ class SubcloudStateManager(manager.Manager):
if endpoint_to_update_list:
try:
db_api.subcloud_status_update_endpoints(context, subcloud_id,
endpoint_to_update_list,
sync_status)
db_api.subcloud_status_update_endpoints(
context,
subcloud_id,
endpoint_to_update_list,
sync_status)
except Exception as e:
LOG.exception(e)
@ -441,7 +446,9 @@ class SubcloudStateManager(manager.Manager):
try:
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
except Exception:
LOG.exception("Failed to get subcloud by region name %s" % subcloud_region)
LOG.exception(
"Failed to get subcloud by region name %s" % subcloud_region
)
raise
if update_state_only:
@ -529,7 +536,9 @@ class SubcloudStateManager(manager.Manager):
try:
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
except Exception:
LOG.exception("Failed to get subcloud by region name: %s" % subcloud_region)
LOG.exception(
"Failed to get subcloud by region name: %s" % subcloud_region
)
raise
try:

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,25 +16,23 @@
#
import json
import sqlalchemy
from oslo_config import cfg
from oslo_db import options
from oslotest import base
import sqlalchemy
from sqlalchemy.engine import Engine
from sqlalchemy import event
from dcmanager.common import consts
from dcmanager.db import api as api
from dcmanager.db import api
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.tests import utils
from oslotest import base
get_engine = api.get_engine
# Enable foreign key support in sqlite - see:
# http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
from sqlalchemy.engine import Engine
from sqlalchemy import event
SUBCLOUD_1 = {'name': 'subcloud1',
'region_name': '2ec93dfb654846909efe61d1b39dd2ce',

View File

@ -1,13 +1,15 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
from six.moves import http_client
import uuid
import mock
from six.moves import http_client
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.rpc import client as rpc_client
@ -308,7 +310,9 @@ class TestPeerGroupAssociationUpdate(testroot.DCManagerApiTest,
@mock.patch.object(psd_common, 'OpenStackDriver')
@mock.patch.object(peer_group_association, 'SysinvClient')
@mock.patch.object(rpc_client, 'ManagerClient')
def test_sync_association(self, mock_client, mock_sysinv_client, mock_keystone_client):
def test_sync_association(
self, mock_client, mock_sysinv_client, mock_keystone_client
):
mock_client().sync_subcloud_peer_group.return_value = True
mock_keystone_client().keystone_client = FakeKeystoneClient()
mock_sysinv_client.return_value = FakeSysinvClient()

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -7,10 +7,10 @@
import base64
import copy
import json
import mock
import os
from os import path as os_path
import mock
import six
from tsconfig.tsconfig import SW_VERSION
import webtest
@ -172,7 +172,8 @@ class TestSubcloudDeployBootstrap(testroot.DCManagerApiTest):
@mock.patch.object(dutils, 'load_yaml_file')
@mock.patch.object(os_path, 'exists')
def test_subcloud_bootstrap_no_bootstrap_values_on_request(
self, mock_path_exists, mock_load_yaml_file):
self, mock_path_exists, mock_load_yaml_file
):
mock_path_exists.side_effect = [False, False, False, False, True]
fake_bootstrap_values = copy.copy(
fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA)
@ -219,7 +220,7 @@ class TestSubcloudDeployBootstrap(testroot.DCManagerApiTest):
name="existing_subcloud",
deploy_status=consts.DEPLOY_STATE_DONE,
**conflicting_subnet
)
)
subcloud = fake_subcloud.create_fake_subcloud(
self.ctx,
@ -264,13 +265,15 @@ class TestSubcloudDeployConfig(testroot.DCManagerApiTest):
self, mock_load_yaml, mock_path_exists
):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx, data_install='')
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
fake_password = \
(base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password}
self.mock_rpc_client().subcloud_deploy_config.return_value = True
self.mock_get_request_data.return_value = data
overrides_file = psd_common.get_config_file_path(subcloud.name)
mock_path_exists.side_effect = lambda x: True if x == overrides_file else False
mock_path_exists.side_effect = \
lambda x: True if x == overrides_file else False
mock_load_yaml.return_value = {
consts.BOOTSTRAP_ADDRESS:
fake_subcloud.FAKE_BOOTSTRAP_VALUE[consts.BOOTSTRAP_ADDRESS]}
@ -289,7 +292,8 @@ class TestSubcloudDeployConfig(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(
self.ctx, data_install=json.dumps(data_install)
)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
fake_password = \
(base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password}
self.mock_rpc_client().subcloud_deploy_config.return_value = True
@ -391,7 +395,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
mock_initial_deployment.return_value = True
self.mock_rpc_client().subcloud_deploy_install.return_value = True
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
response = self.app.patch_json(
FAKE_URL + '/' + str(subcloud.id) + '/install',
@ -427,10 +432,12 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
mock_initial_deployment.return_value = True
self.mock_rpc_client().subcloud_deploy_install.return_value = True
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
with mock.patch('builtins.open',
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)):
with mock.patch('builtins.open', mock.mock_open(
read_data=fake_subcloud.FAKE_UPGRADES_METADATA
)):
response = self.app.patch_json(
FAKE_URL + '/' + str(subcloud.id) + '/install',
headers=FAKE_HEADERS, params=install_payload)
@ -441,7 +448,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
@mock.patch.object(psd_common, 'is_initial_deployment')
def test_install_subcloud_not_initial_deployment(
self, mock_initial_deployment):
self, mock_initial_deployment
):
subcloud = fake_subcloud.create_fake_subcloud(
self.ctx,
@ -463,7 +471,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
self.mock_get_subcloud_db_install_values.return_value = install_data
self.mock_rpc_client().subcloud_deploy_install.return_value = True
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
mock_initial_deployment.return_value = False
six.assertRaisesRegex(self, webtest.app.AppError, "400 *",
@ -489,7 +498,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
@mock.patch.object(psd_common, 'is_initial_deployment')
def test_install_subcloud_no_install_values_on_request_or_db(
self, mock_initial_deployment):
self, mock_initial_deployment
):
subcloud = fake_subcloud.create_fake_subcloud(
self.ctx,
@ -513,7 +523,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
@mock.patch.object(psd_common, 'is_initial_deployment')
def test_install_subcloud_no_install_values_on_request(
self, mock_initial_deployment):
self, mock_initial_deployment
):
subcloud = fake_subcloud.create_fake_subcloud(
self.ctx,
@ -533,7 +544,8 @@ class TestSubcloudDeployInstall(testroot.DCManagerApiTest):
self.mock_get_subcloud_db_install_values.return_value = install_data
self.mock_rpc_client().subcloud_deploy_install.return_value = True
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
mock_initial_deployment.return_value = True
response = self.app.patch_json(
@ -688,8 +700,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest):
@mock.patch.object(os_path, 'isdir')
@mock.patch.object(os, 'listdir')
def test_resume_subcloud(
self, mock_os_listdir, mock_os_isdir, mock_initial_deployment,
mock_load_yaml):
self, mock_os_listdir, mock_os_isdir, mock_initial_deployment, mock_load_yaml
):
mock_os_isdir.return_value = True
mock_os_listdir.return_value = ['deploy_chart_fake.tgz',
'deploy_overrides_fake.yaml',
@ -705,7 +717,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest):
data_install=json.dumps(data_install)
)
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
self.mock_rpc_client().subcloud_deploy_resume.return_value = True
mock_initial_deployment.return_value = True
mock_load_yaml.return_value = {
@ -722,7 +735,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest):
install_request = {'install_values': data_install,
'sysadmin_password': fake_sysadmin_password,
'bmc_password': fake_bmc_password}
bootstrap_request = {'bootstrap_values': fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA}
bootstrap_request = \
{'bootstrap_values': fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA}
config_request = {'deploy_config': 'deploy config values',
'sysadmin_password': fake_sysadmin_password}
resume_request = {**install_request,
@ -758,7 +772,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest):
deploy_status=consts.DEPLOY_STATE_CREATED,
software_version=SW_VERSION)
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
self.mock_rpc_client().subcloud_deploy_resume.return_value = True
mock_initial_deployment.return_value = False
@ -780,7 +795,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest):
deploy_status=consts.DEPLOY_STATE_CREATED,
software_version=SW_VERSION)
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
self.mock_rpc_client().subcloud_deploy_resume.return_value = True
invalid_resume_states = [consts.DEPLOY_STATE_INSTALLING,
consts.DEPLOY_STATE_BOOTSTRAPPING,
@ -826,7 +842,8 @@ class TestSubcloudDeployResume(testroot.DCManagerApiTest):
mock_os_listdir.return_value = ['deploy_chart_fake.tgz',
'deploy_overrides_fake.yaml',
'deploy_playbook_fake.yaml']
self.mock_get_vault_load_files.return_value = ('iso_file_path', 'sig_file_path')
self.mock_get_vault_load_files.return_value = \
('iso_file_path', 'sig_file_path')
self.mock_rpc_client().subcloud_deploy_resume.return_value = True
mock_initial_deployment.return_value = True

View File

@ -1,4 +1,4 @@
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,12 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from os import path as os_path
import mock
import six
from six.moves import http_client
from tsconfig.tsconfig import SW_VERSION
import webtest
from dccommon import consts as dccommon_consts
@ -30,14 +32,16 @@ from dcmanager.tests.unit.api import test_root_controller as testroot
from dcmanager.tests.unit.common import fake_subcloud
from dcmanager.tests import utils
from tsconfig.tsconfig import SW_VERSION
FAKE_SOFTWARE_VERSION = '22.12'
FAKE_TENANT = utils.UUID1
FAKE_ID = '1'
FAKE_URL = '/v1.0/subcloud-deploy'
FAKE_HEADERS = {'X-Tenant-Id': FAKE_TENANT, 'X_ROLE': 'admin,member,reader',
'X-Identity-Status': 'Confirmed', 'X-Project-Name': 'admin'}
FAKE_ID = "1"
FAKE_URL = "/v1.0/subcloud-deploy"
FAKE_HEADERS = {
"X-Tenant-Id": FAKE_TENANT,
"X_ROLE": "admin,member,reader",
"X-Identity-Status": "Confirmed",
"X-Project-Name": "admin",
}
FAKE_DEPLOY_PLAYBOOK_PREFIX = consts.DEPLOY_PLAYBOOK + '_'
FAKE_DEPLOY_OVERRIDES_PREFIX = consts.DEPLOY_OVERRIDES + '_'
@ -52,7 +56,8 @@ FAKE_DEPLOY_FILES = {
FAKE_DEPLOY_CHART_PREFIX: FAKE_DEPLOY_CHART_FILE,
}
FAKE_DEPLOY_DELETE_FILES = {
FAKE_DEPLOY_PLAYBOOK_PREFIX: '/opt/platform/deploy/22.12/deployment-manager.yaml',
FAKE_DEPLOY_PLAYBOOK_PREFIX:
'/opt/platform/deploy/22.12/deployment-manager.yaml',
FAKE_DEPLOY_OVERRIDES_PREFIX:
'/opt/platform/deploy/22.12/deployment-manager-overrides-subcloud.yaml',
FAKE_DEPLOY_CHART_PREFIX: '/opt/platform/deploy/22.12/deployment-manager.tgz',
@ -73,154 +78,144 @@ class TestSubcloudDeploy(testroot.DCManagerApiTest):
super(TestSubcloudDeploy, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy(self, mock_upload_files):
params = [('release', FAKE_SOFTWARE_VERSION)]
params = [("release", FAKE_SOFTWARE_VERSION)]
fields = list()
for opt in consts.DEPLOY_COMMON_FILE_OPTIONS:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, webtest.Upload(fake_name, fake_content)))
mock_upload_files.return_value = True
params += fields
with mock.patch('builtins.open',
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)):
mock.mock_open(
read_data=fake_subcloud.FAKE_UPGRADES_METADATA
)):
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
params=params)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(FAKE_SOFTWARE_VERSION, response.json['software_version'])
self.assertEqual(FAKE_SOFTWARE_VERSION, response.json["software_version"])
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_without_release(self, mock_upload_files):
fields = list()
for opt in consts.DEPLOY_COMMON_FILE_OPTIONS:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields)
response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields)
self.assertEqual(response.status_code, http_client.OK)
# Verify the active release will be returned if release doesn't present
self.assertEqual(SW_VERSION, response.json['software_version'])
self.assertEqual(SW_VERSION, response.json["software_version"])
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_missing_chart(self, mock_upload_files):
opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, consts.DEPLOY_PRESTAGE]
opts = [
consts.DEPLOY_PLAYBOOK,
consts.DEPLOY_OVERRIDES,
consts.DEPLOY_PRESTAGE,
]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields,
expect_errors=True)
response = self.app.post(
FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True
)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_missing_chart_prestages(self, mock_upload_files):
opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields,
expect_errors=True)
response = self.app.post(
FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True
)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
def test_post_subcloud_deploy_missing_playbook_overrides(self, mock_upload_files):
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_missing_playbook_overrides(
self, mock_upload_files
):
opts = [consts.DEPLOY_CHART, consts.DEPLOY_PRESTAGE]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields,
expect_errors=True)
response = self.app.post(
FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True
)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_missing_prestage(self, mock_upload_files):
opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, consts.DEPLOY_CHART]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields)
response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields)
self.assertEqual(response.status_code, http_client.OK)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_all_input(self, mock_upload_files):
opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES,
consts.DEPLOY_CHART, consts.DEPLOY_PRESTAGE]
opts = [
consts.DEPLOY_PLAYBOOK,
consts.DEPLOY_OVERRIDES,
consts.DEPLOY_CHART,
consts.DEPLOY_PRESTAGE,
]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields)
response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields)
self.assertEqual(response.status_code, http_client.OK)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_prestage(self, mock_upload_files):
opts = [consts.DEPLOY_PRESTAGE]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields)
response = self.app.post(FAKE_URL, headers=FAKE_HEADERS, upload_files=fields)
self.assertEqual(response.status_code, http_client.OK)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
@mock.patch.object(subcloud_deploy.SubcloudDeployController, "_upload_files")
def test_post_subcloud_deploy_missing_file_name(self, mock_upload_files):
fields = list()
for opt in consts.DEPLOY_COMMON_FILE_OPTIONS:
fake_content = "fake content".encode('utf-8')
fake_content = "fake content".encode("utf-8")
fields.append((opt, "", fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields,
expect_errors=True)
response = self.app.post(
FAKE_URL, headers=FAKE_HEADERS, upload_files=fields, expect_errors=True
)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(dutils, 'get_filename_by_prefix')
@mock.patch.object(dutils, "get_filename_by_prefix")
def test_get_subcloud_deploy_with_release(self, mock_get_filename_by_prefix):
def get_filename_by_prefix_side_effect(dir_path, prefix):
filename = FAKE_DEPLOY_FILES.get(prefix)
if filename:
@ -234,24 +229,34 @@ class TestSubcloudDeploy(testroot.DCManagerApiTest):
url = FAKE_URL + '/' + FAKE_SOFTWARE_VERSION
with mock.patch('builtins.open',
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)):
mock.mock_open(
read_data=fake_subcloud.FAKE_UPGRADES_METADATA
)):
response = self.app.get(url, headers=FAKE_HEADERS)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(FAKE_SOFTWARE_VERSION,
response.json['subcloud_deploy']['software_version'])
self.assertEqual(FAKE_DEPLOY_PLAYBOOK_FILE,
response.json['subcloud_deploy'][consts.DEPLOY_PLAYBOOK])
self.assertEqual(FAKE_DEPLOY_OVERRIDES_FILE,
response.json['subcloud_deploy'][consts.DEPLOY_OVERRIDES])
self.assertEqual(FAKE_DEPLOY_CHART_FILE,
response.json['subcloud_deploy'][consts.DEPLOY_CHART])
self.assertEqual(None,
response.json['subcloud_deploy'][consts.DEPLOY_PRESTAGE])
self.assertEqual(
FAKE_SOFTWARE_VERSION,
response.json["subcloud_deploy"]["software_version"],
)
self.assertEqual(
FAKE_DEPLOY_PLAYBOOK_FILE,
response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK],
)
self.assertEqual(
FAKE_DEPLOY_OVERRIDES_FILE,
response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES],
)
self.assertEqual(
FAKE_DEPLOY_CHART_FILE,
response.json["subcloud_deploy"][consts.DEPLOY_CHART],
)
self.assertEqual(
None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE]
)
@mock.patch.object(dutils, 'get_filename_by_prefix')
@mock.patch.object(dutils, "get_filename_by_prefix")
def test_get_subcloud_deploy_without_release(self, mock_get_filename_by_prefix):
def get_filename_by_prefix_side_effect(dir_path, prefix):
filename = FAKE_DEPLOY_FILES.get(prefix)
if filename:
@ -260,33 +265,47 @@ class TestSubcloudDeploy(testroot.DCManagerApiTest):
return None
os.path.isdir = mock.Mock(return_value=True)
mock_get_filename_by_prefix.side_effect = \
get_filename_by_prefix_side_effect
mock_get_filename_by_prefix.side_effect = get_filename_by_prefix_side_effect
response = self.app.get(FAKE_URL, headers=FAKE_HEADERS)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(SW_VERSION,
response.json['subcloud_deploy']['software_version'])
self.assertEqual(FAKE_DEPLOY_PLAYBOOK_FILE,
response.json['subcloud_deploy'][consts.DEPLOY_PLAYBOOK])
self.assertEqual(FAKE_DEPLOY_OVERRIDES_FILE,
response.json['subcloud_deploy'][consts.DEPLOY_OVERRIDES])
self.assertEqual(FAKE_DEPLOY_CHART_FILE,
response.json['subcloud_deploy'][consts.DEPLOY_CHART])
self.assertEqual(None,
response.json['subcloud_deploy'][consts.DEPLOY_PRESTAGE])
self.assertEqual(
SW_VERSION, response.json["subcloud_deploy"]["software_version"]
)
self.assertEqual(
FAKE_DEPLOY_PLAYBOOK_FILE,
response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK],
)
self.assertEqual(
FAKE_DEPLOY_OVERRIDES_FILE,
response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES],
)
self.assertEqual(
FAKE_DEPLOY_CHART_FILE,
response.json["subcloud_deploy"][consts.DEPLOY_CHART],
)
self.assertEqual(
None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE]
)
def test_get_config_file_path(self):
bootstrap_file = psd_common.get_config_file_path("subcloud1")
install_values = psd_common.get_config_file_path("subcloud1",
consts.INSTALL_VALUES)
deploy_config = psd_common.get_config_file_path("subcloud1",
consts.DEPLOY_CONFIG)
self.assertEqual(bootstrap_file,
f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml')
self.assertEqual(install_values,
f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml')
self.assertEqual(deploy_config,
f'{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml')
install_values = psd_common.get_config_file_path(
"subcloud1", consts.INSTALL_VALUES
)
deploy_config = psd_common.get_config_file_path(
"subcloud1", consts.DEPLOY_CONFIG
)
self.assertEqual(
bootstrap_file, f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml"
)
self.assertEqual(
install_values,
f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml",
)
self.assertEqual(
deploy_config,
f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml",
)
@mock.patch.object(os_path, 'isdir')
@mock.patch.object(dutils, 'get_sw_version')

View File

@ -1,4 +1,4 @@
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -45,46 +45,41 @@ class SubcloudPeerGroupAPIMixin(APIMixin):
def _get_test_subcloud_peer_group_request(self, **kw):
# id should not be part of the structure
group = {
'peer-group-name': kw.get('peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME),
'peer-group-name': kw.get(
'peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME
),
'system-leader-id': kw.get(
'system_leader_id',
'62c9592d-f799-4db9-8d40-6786a74d6021'),
'system_leader_id', '62c9592d-f799-4db9-8d40-6786a74d6021'
),
'system-leader-name': kw.get(
'system_leader_name',
'dc-test'),
'group-priority': kw.get(
'group_priority',
'0'),
'group-state': kw.get(
'group_state',
'enabled'),
'system_leader_name', 'dc-test'
),
'group-priority': kw.get('group_priority', '0'),
'group-state': kw.get('group_state', 'enabled'),
'max-subcloud-rehoming': kw.get(
'max_subcloud_rehoming',
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING)
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING
)
}
return group
def _get_test_subcloud_peer_group_dict(self, **kw):
# id should not be part of the structure
group = {
'peer_group_name': kw.get('peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME),
'peer_group_name': kw.get(
'peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME
),
'system_leader_id': kw.get(
'system_leader_id',
'62c9592d-f799-4db9-8d40-6786a74d6021'),
'system_leader_name': kw.get(
'system_leader_name',
'dc-test'),
'group_priority': kw.get(
'group_priority',
'0'),
'group_state': kw.get(
'group_state',
'enabled'),
'system_leader_id', '62c9592d-f799-4db9-8d40-6786a74d6021'
),
'system_leader_name': kw.get('system_leader_name', 'dc-test'),
'group_priority': kw.get('group_priority', '0'),
'group_state': kw.get('group_state', 'enabled'),
'max_subcloud_rehoming': kw.get(
'max_subcloud_rehoming',
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING),
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING
),
'migration_status': None
}
return group

Some files were not shown because too many files have changed in this diff Show More