Tech debt: move portions of dcorch.common.consts into dccommon.consts

Parts of dcorch.common.consts: such as ENDPOINT_TYPE_*,
ENDPOINT_TYPES_LIST, SYNC_STATUS_*, and so on, are shared by
dcmanager, so they should be moved into the dccommon.consts.

Test Plan:

PASSED:

All distributedcloud sanity tests.

Closes-Bug: 1978334
Change-Id: I13d7ec72e2863171138b39cd8f982d69e87d60b8
This commit is contained in:
Li Zhu 2022-06-16 12:28:44 -04:00
parent 4114969a0c
commit 665fa12a79
65 changed files with 873 additions and 865 deletions

View File

@ -39,3 +39,87 @@ ADMIN_PROJECT_NAME = "admin"
SYSINV_USER_NAME = "sysinv"
DCMANAGER_USER_NAME = "dcmanager"
SERVICES_USER_NAME = "services"
NOVA_QUOTA_FIELDS = ("metadata_items",
"cores",
"instances",
"ram",
"key_pairs",
"injected_files",
"injected_file_path_bytes",
"injected_file_content_bytes",
"server_group_members",
"server_groups",)
CINDER_QUOTA_FIELDS = ("volumes",
"volumes_iscsi",
"volumes_ceph",
"per_volume_gigabytes",
"groups",
"snapshots",
"snapshots_iscsi",
"snapshots_ceph",
"gigabytes",
"gigabytes_iscsi",
"gigabytes_ceph",
"backups",
"backup_gigabytes")
NEUTRON_QUOTA_FIELDS = ("network",
"subnet",
"subnetpool",
"rbac_policy",
"trunk",
"port",
"router",
"floatingip",
"security_group",
"security_group_rule",
)
ENDPOINT_TYPE_PLATFORM = "platform"
ENDPOINT_TYPE_PATCHING = "patching"
ENDPOINT_TYPE_IDENTITY = "identity"
ENDPOINT_TYPE_FM = "faultmanagement"
ENDPOINT_TYPE_NFV = "nfv"
ENDPOINT_TYPE_LOAD = "load"
ENDPOINT_TYPE_DC_CERT = 'dc-cert'
ENDPOINT_TYPE_FIRMWARE = 'firmware'
ENDPOINT_TYPE_KUBERNETES = 'kubernetes'
ENDPOINT_TYPE_KUBE_ROOTCA = 'kube-rootca'
# All endpoint types
ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_PATCHING,
ENDPOINT_TYPE_IDENTITY,
ENDPOINT_TYPE_LOAD,
ENDPOINT_TYPE_DC_CERT,
ENDPOINT_TYPE_FIRMWARE,
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA]
# All endpoint audit requests
ENDPOINT_AUDIT_REQUESTS = {
ENDPOINT_TYPE_FIRMWARE: 'firmware_audit_requested',
ENDPOINT_TYPE_KUBERNETES: 'kubernetes_audit_requested',
ENDPOINT_TYPE_KUBE_ROOTCA: 'kube_rootca_update_audit_requested',
ENDPOINT_TYPE_LOAD: 'load_audit_requested',
ENDPOINT_TYPE_PATCHING: 'patch_audit_requested',
}
# Well known region names
SYSTEM_CONTROLLER_NAME = "SystemController"
DEFAULT_REGION_NAME = "RegionOne"
# Subcloud management state
MANAGEMENT_UNMANAGED = "unmanaged"
MANAGEMENT_MANAGED = "managed"
# Subcloud availability status
AVAILABILITY_OFFLINE = "offline"
AVAILABILITY_ONLINE = "online"
# Subcloud sync status
SYNC_STATUS_UNKNOWN = "unknown"
SYNC_STATUS_IN_SYNC = "in-sync"
SYNC_STATUS_OUT_OF_SYNC = "out-of-sync"

View File

@ -29,7 +29,7 @@ PATCH_STATE_PARTIAL_APPLY = 'Partial-Apply'
PATCH_STATE_PARTIAL_REMOVE = 'Partial-Remove'
PATCH_STATE_COMMITTED = 'Committed'
PATCH_STATE_UNKNOWN = 'n/a'
PATCH_REST_DEFAULT_TIMEOUT = 900
PATCH_REST_DEFAULT_TIMEOUT = 600
class PatchingClient(base.DriverBase):

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -14,10 +14,10 @@
import mock
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import sysinv_v1
from dccommon.tests import base
from dccommon.tests import utils
from dcmanager.common import consts
class FakeInterface(object):
@ -61,7 +61,7 @@ class TestSysinvClient(base.DCCommonTestCase):
def test_get_controller_hosts(self, mock_sysinvclient_init):
controller_list = ['controller-0', 'controller-1']
mock_sysinvclient_init.return_value = None
sysinv_client = sysinv_v1.SysinvClient(consts.DEFAULT_REGION_NAME,
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
None)
sysinv_client.sysinv_client = mock.MagicMock()
sysinv_client.sysinv_client.ihost.list_personality = mock.MagicMock()
@ -75,7 +75,7 @@ class TestSysinvClient(base.DCCommonTestCase):
interface = FakeInterface('interface', 'uuid')
interface_network = FakeInterfaceNetwork('mgmt', 'interface')
mock_sysinvclient_init.return_value = None
sysinv_client = sysinv_v1.SysinvClient(consts.DEFAULT_REGION_NAME,
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
None)
sysinv_client.sysinv_client = mock.MagicMock()
sysinv_client.sysinv_client.iinterface.list = mock.MagicMock()
@ -91,7 +91,7 @@ class TestSysinvClient(base.DCCommonTestCase):
network = FakeNetwork('mgmt', 'uuid')
pool = FakeAddressPool('uuid')
mock_sysinvclient_init.return_value = None
sysinv_client = sysinv_v1.SysinvClient(consts.DEFAULT_REGION_NAME,
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
None)
sysinv_client.sysinv_client = mock.MagicMock()
sysinv_client.sysinv_client.network.list = mock.MagicMock()
@ -105,7 +105,7 @@ class TestSysinvClient(base.DCCommonTestCase):
def test_create_route(self, mock_sysinvclient_init):
fake_route = utils.create_route_dict(base.ROUTE_0)
mock_sysinvclient_init.return_value = None
sysinv_client = sysinv_v1.SysinvClient(consts.DEFAULT_REGION_NAME,
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
None)
sysinv_client.sysinv_client = mock.MagicMock()
sysinv_client.sysinv_client.route.create = mock.MagicMock()

View File

@ -1,4 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -37,6 +38,7 @@ import pecan
from pecan import expose
from pecan import request
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon import exceptions as dccommon_exceptions
@ -55,7 +57,6 @@ from dcmanager.common import utils
from dcmanager.db import api as db_api
from dcmanager.rpc import client as rpc_client
from dcorch.common import consts as dcorch_consts
from six.moves import range
CONF = cfg.CONF
@ -364,11 +365,11 @@ class SubcloudsController(object):
if name.isdigit():
pecan.abort(400, _("name must contain alphabetic characters"))
if name in [consts.DEFAULT_REGION_NAME,
consts.SYSTEM_CONTROLLER_NAME]:
if name in [dccommon_consts.DEFAULT_REGION_NAME,
dccommon_consts.SYSTEM_CONTROLLER_NAME]:
pecan.abort(400, _("name cannot be %(bad_name1)s or %(bad_name2)s")
% {'bad_name1': consts.DEFAULT_REGION_NAME,
'bad_name2': consts.SYSTEM_CONTROLLER_NAME})
% {'bad_name1': dccommon_consts.DEFAULT_REGION_NAME,
'bad_name2': dccommon_consts.SYSTEM_CONTROLLER_NAME})
# Parse/validate the management subnet
subcloud_subnets = []
@ -715,7 +716,7 @@ class SubcloudsController(object):
return user_list
@staticmethod
def get_ks_client(region_name=consts.DEFAULT_REGION_NAME):
def get_ks_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
"""This will get a new keystone client (and new token)"""
try:
os_client = OpenStackDriver(region_name=region_name,
@ -730,7 +731,7 @@ class SubcloudsController(object):
"""Get the system controller's management address pool"""
ks_client = self.get_ks_client()
endpoint = ks_client.endpoint_cache.get_endpoint('sysinv')
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME,
sysinv_client = SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
ks_client.session,
endpoint=endpoint)
return sysinv_client.get_management_address_pool()
@ -845,7 +846,7 @@ class SubcloudsController(object):
if subcloud_list[-1][consts.SYNC_STATUS] != \
subcloud_dict[consts.SYNC_STATUS]:
subcloud_list[-1][consts.SYNC_STATUS] = \
consts.SYNC_STATUS_OUT_OF_SYNC
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
if subcloud_status:
subcloud_status_list.append(
@ -916,7 +917,7 @@ class SubcloudsController(object):
if detail is not None:
oam_floating_ip = "unavailable"
if subcloud.availability_status == consts.AVAILABILITY_ONLINE:
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE:
oam_addresses = self._get_oam_addresses(context,
subcloud.name)
if oam_addresses is not None:
@ -1096,15 +1097,15 @@ class SubcloudsController(object):
# Syntax checking
if management_state and \
management_state not in [consts.MANAGEMENT_UNMANAGED,
consts.MANAGEMENT_MANAGED]:
management_state not in [dccommon_consts.MANAGEMENT_UNMANAGED,
dccommon_consts.MANAGEMENT_MANAGED]:
pecan.abort(400, _('Invalid management-state'))
force_flag = payload.get('force')
if force_flag is not None:
if force_flag not in [True, False]:
pecan.abort(400, _('Invalid force value'))
elif management_state != consts.MANAGEMENT_MANAGED:
elif management_state != dccommon_consts.MANAGEMENT_MANAGED:
pecan.abort(400, _('Invalid option: force'))
# Verify the group_id is valid
@ -1184,7 +1185,7 @@ class SubcloudsController(object):
payload = self._get_request_data(request)
install_values = self._get_subcloud_db_install_values(subcloud)
if subcloud.availability_status == consts.AVAILABILITY_ONLINE:
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE:
msg = _('Cannot re-install an online subcloud')
LOG.exception(msg)
pecan.abort(400, msg)
@ -1322,7 +1323,7 @@ class SubcloudsController(object):
description=payload.get('description', subcloud.description),
location=payload.get('location', subcloud.location),
software_version=tsc.SW_VERSION,
management_state=consts.MANAGEMENT_UNMANAGED,
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
deploy_status=consts.DEPLOY_STATE_PRE_INSTALL,
data_install=data_install)
@ -1340,7 +1341,7 @@ class SubcloudsController(object):
if not payload:
pecan.abort(400, _('Body required'))
if subcloud.management_state != consts.MANAGEMENT_UNMANAGED:
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED:
pecan.abort(400, _('Subcloud can not be restored while it is still '
'in managed state. Please unmanage the subcloud '
'and try again.'))
@ -1498,7 +1499,7 @@ class SubcloudsController(object):
endpoint = payload.get('endpoint')
if not endpoint:
pecan.abort(400, _('endpoint required'))
allowed_endpoints = [dcorch_consts.ENDPOINT_TYPE_DC_CERT]
allowed_endpoints = [dccommon_consts.ENDPOINT_TYPE_DC_CERT]
if endpoint not in allowed_endpoints:
pecan.abort(400, _('updating endpoint %s status is not allowed'
% endpoint))
@ -1507,9 +1508,9 @@ class SubcloudsController(object):
if not status:
pecan.abort(400, _('status required'))
allowed_status = [consts.SYNC_STATUS_IN_SYNC,
consts.SYNC_STATUS_OUT_OF_SYNC,
consts.SYNC_STATUS_UNKNOWN]
allowed_status = [dccommon_consts.SYNC_STATUS_IN_SYNC,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
dccommon_consts.SYNC_STATUS_UNKNOWN]
if status not in allowed_status:
pecan.abort(400, _('status %s in invalid.' % status))

View File

@ -1,5 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -24,7 +24,6 @@ from pecan import request
from dccommon import consts as dccommon_consts
from dcmanager.api.controllers import restcomm
from dcmanager.common import consts
from dcmanager.common import exceptions
from dcmanager.common.i18n import _
from dcmanager.common import utils
@ -77,7 +76,7 @@ class SwUpdateOptionsController(object):
return result
elif subcloud_ref == consts.DEFAULT_REGION_NAME:
elif subcloud_ref == dccommon_consts.DEFAULT_REGION_NAME:
# Default options requested, guaranteed to succeed
return utils.get_sw_update_opts(context)
@ -121,7 +120,7 @@ class SwUpdateOptionsController(object):
if not payload:
pecan.abort(400, _('Body required'))
if subcloud_ref == consts.DEFAULT_REGION_NAME:
if subcloud_ref == dccommon_consts.DEFAULT_REGION_NAME:
# update default options
subcloud_name = dccommon_consts.SW_UPDATE_DEFAULT_TITLE
@ -207,7 +206,7 @@ class SwUpdateOptionsController(object):
context = restcomm.extract_context_from_environ()
if subcloud_ref == consts.DEFAULT_REGION_NAME:
if subcloud_ref == dccommon_consts.DEFAULT_REGION_NAME:
# Delete defaults.
# Note by deleting these, the next get will repopulate with
# the global constants.

View File

@ -23,6 +23,7 @@ import pecan
from pecan import expose
from pecan import request
from dccommon import consts as dccommon_consts
from dcmanager.api.controllers import restcomm
from dcmanager.common import consts
from dcmanager.common import exceptions
@ -109,7 +110,7 @@ class SwUpdateStrategyController(object):
else:
# Single step requested
strategy_step = None
if cloud_name == consts.SYSTEM_CONTROLLER_NAME:
if cloud_name == dccommon_consts.SYSTEM_CONTROLLER_NAME:
# The system controller step does not map to a subcloud,
# so has no name.
try:

View File

@ -1,12 +1,12 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
# Copyright (c) 2021-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import abc
import six
from dcmanager.common import consts
from dccommon import consts as dccommon_consts
class Auditor(object):
@ -30,11 +30,12 @@ class Auditor(object):
def set_subcloud_endpoint_in_sync(self, sc_name):
"""Set the endpoint sync status of this subcloud to be in sync"""
self._set_subcloud_sync_status(sc_name, consts.SYNC_STATUS_IN_SYNC)
self._set_subcloud_sync_status(sc_name, dccommon_consts.SYNC_STATUS_IN_SYNC)
def set_subcloud_endpoint_out_of_sync(self, sc_name):
"""Set the endpoint sync status of this subcloud to be out of sync"""
self._set_subcloud_sync_status(sc_name, consts.SYNC_STATUS_OUT_OF_SYNC)
self._set_subcloud_sync_status(sc_name,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
@abc.abstractmethod
def get_regionone_audit_data(self):

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -18,11 +18,10 @@
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcorch.common import consts as dcorch_consts
from dcmanager.common import consts
@ -94,11 +93,11 @@ class FirmwareAudit(object):
"""
try:
m_os_ks_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
endpoint = m_os_ks_client.endpoint_cache.get_endpoint('sysinv')
sysinv_client = SysinvClient(
consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
endpoint=endpoint)
except Exception:
LOG.exception('Failure initializing OS Client, skip firmware audit.')
@ -230,8 +229,8 @@ class FirmwareAudit(object):
LOG.info('Triggered firmware audit for: %s.' % subcloud_name)
if not audit_data:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
dccommon_consts.SYNC_STATUS_IN_SYNC)
LOG.debug('No images to audit, exiting firmware audit')
return
try:
@ -268,8 +267,8 @@ class FirmwareAudit(object):
LOG.info("No enabled devices on the subcloud %s,"
"exiting firmware audit" % subcloud_name)
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
dccommon_consts.SYNC_STATUS_IN_SYNC)
return
# Retrieve the device image states on this subcloud.
@ -313,10 +312,10 @@ class FirmwareAudit(object):
if out_of_sync:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
consts.SYNC_STATUS_OUT_OF_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
else:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
dccommon_consts.SYNC_STATUS_IN_SYNC)
LOG.info('Firmware audit completed for: %s.' % subcloud_name)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
# Copyright (c) 2021-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -10,9 +10,9 @@ from oslo_log import log as logging
from fm_api.constants import FM_ALARM_ID_CERT_EXPIRED
from fm_api.constants import FM_ALARM_ID_CERT_EXPIRING_SOON
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.fm import FmClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dcorch.common import consts as dcorch_consts
from dcmanager.audit.auditor import Auditor
@ -31,7 +31,7 @@ class KubeRootcaUpdateAudit(Auditor):
super(KubeRootcaUpdateAudit, self).__init__(
context,
dcmanager_state_rpc_client,
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA
)
self.audit_type = "kube rootca update"
LOG.debug("%s audit initialized" % self.audit_type)

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -18,13 +18,10 @@
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcorch.common import consts as dcorch_consts
from dcmanager.common import consts
LOG = logging.getLogger(__name__)
@ -74,11 +71,11 @@ class KubernetesAudit(object):
"""
try:
m_os_ks_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
endpoint = m_os_ks_client.endpoint_cache.get_endpoint('sysinv')
sysinv_client = SysinvClient(
consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
endpoint=endpoint)
except Exception:
LOG.exception('Failed init OS Client, skip kubernetes audit.')
@ -97,8 +94,8 @@ class KubernetesAudit(object):
LOG.info('Triggered kubernetes audit for: %s' % subcloud_name)
if not audit_data:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
dccommon_consts.SYNC_STATUS_IN_SYNC)
LOG.debug('No region one audit data, exiting kubernetes audit')
return
try:
@ -155,10 +152,10 @@ class KubernetesAudit(object):
if out_of_sync:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
consts.SYNC_STATUS_OUT_OF_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
else:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
dccommon_consts.SYNC_STATUS_IN_SYNC)
LOG.info('Kubernetes audit completed for: %s' % subcloud_name)

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -18,14 +18,12 @@
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import patching_v1
from dccommon.drivers.openstack.patching_v1 import PatchingClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcorch.common import consts as dcorch_consts
from dcmanager.common import consts
from dcmanager.common import utils
LOG = logging.getLogger(__name__)
@ -92,15 +90,15 @@ class PatchAudit(object):
"""
try:
m_os_ks_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
patching_endpoint = m_os_ks_client.endpoint_cache.get_endpoint('patching')
sysinv_endpoint = m_os_ks_client.endpoint_cache.get_endpoint('sysinv')
patching_client = PatchingClient(
consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
endpoint=patching_endpoint)
sysinv_client = SysinvClient(
consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
endpoint=sysinv_endpoint)
except Exception:
LOG.exception('Failure initializing OS Client, skip patch audit.')
@ -229,12 +227,12 @@ class PatchAudit(object):
if out_of_sync:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_OUT_OF_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
else:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Check subcloud software version every other audit cycle
if do_load_audit:
@ -253,16 +251,16 @@ class PatchAudit(object):
if subcloud_software_version == audit_data.software_version:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_IN_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_IN_SYNC)
else:
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
else:
# As upgrade is still in progress, set the subcloud load
# status as out-of-sync.
self._update_subcloud_sync_status(
subcloud_name, dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
subcloud_name, dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
LOG.info('Patch audit completed for: %s.' % subcloud_name)

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -37,7 +37,6 @@ from dcmanager.common import context
from dcmanager.common.i18n import _
from dcmanager.common import manager
from dcmanager.db import api as db_api
from dcorch.common import consts as dcorch_consts
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -113,7 +112,7 @@ class SubcloudAuditManager(manager.Manager):
db_api.subcloud_status_get_all(self.context,
subcloud.id)
# Use set difference to find missing endpoints
endpoint_type_set = set(dcorch_consts.ENDPOINT_TYPES_LIST)
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
subcloud_set = set()
for subcloud_status in subcloud_statuses:
subcloud_set.add(subcloud_status.endpoint_type)
@ -194,7 +193,7 @@ class SubcloudAuditManager(manager.Manager):
# to disable the audit explicitly.
if exclude_endpoints:
for exclude_endpoint in exclude_endpoints:
exclude_request = dcorch_consts.ENDPOINT_AUDIT_REQUESTS.get(
exclude_request = dccommon_consts.ENDPOINT_AUDIT_REQUESTS.get(
exclude_endpoint)
if exclude_request:
values.update({exclude_request: False})

View File

@ -205,7 +205,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
service of servicegroup-list then declare the subcloud online.
"""
avail_to_set = consts.AVAILABILITY_OFFLINE
avail_to_set = dccommon_consts.AVAILABILITY_OFFLINE
svc_groups = None
# get a list of service groups in the subcloud
@ -236,7 +236,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
# means we're good to go.
if not inactive_only and active_sgs:
avail_to_set = \
consts.AVAILABILITY_ONLINE
dccommon_consts.AVAILABILITY_ONLINE
else:
LOG.info("Subcloud:%s has non-active "
"service groups: %s" %
@ -349,7 +349,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
sysinv_client = None
fm_client = None
avail_to_set = consts.AVAILABILITY_OFFLINE
avail_to_set = dccommon_consts.AVAILABILITY_OFFLINE
try:
os_client = OpenStackDriver(region_name=subcloud_name,
@ -358,7 +358,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
sysinv_client = os_client.sysinv_client
fm_client = os_client.fm_client
except keystone_exceptions.ConnectTimeout:
if avail_status_current == consts.AVAILABILITY_OFFLINE:
if avail_status_current == dccommon_consts.AVAILABILITY_OFFLINE:
LOG.debug("Identity or Platform endpoint for %s not "
"found, ignoring for offline "
"subcloud." % subcloud_name)
@ -371,7 +371,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
except (keystone_exceptions.EndpointNotFound,
keystone_exceptions.ConnectFailure,
IndexError):
if avail_status_current == consts.AVAILABILITY_OFFLINE:
if avail_status_current == dccommon_consts.AVAILABILITY_OFFLINE:
LOG.info("Identity or Platform endpoint for %s not "
"found, ignoring for offline "
"subcloud." % subcloud_name)
@ -392,19 +392,19 @@ class SubcloudAuditWorkerManager(manager.Manager):
# is online (otherwise prestaging will fail):
if subcloud.deploy_status in (consts.PRESTAGE_STATE_PACKAGES,
consts.PRESTAGE_STATE_IMAGES):
avail_to_set = consts.AVAILABILITY_ONLINE
avail_to_set = dccommon_consts.AVAILABILITY_ONLINE
else:
avail_to_set = self._get_subcloud_availability_status(
subcloud_name, sysinv_client)
if avail_to_set == consts.AVAILABILITY_OFFLINE:
if avail_to_set == dccommon_consts.AVAILABILITY_OFFLINE:
if audit_fail_count < consts.AVAIL_FAIL_COUNT_MAX:
audit_fail_count = audit_fail_count + 1
if (avail_status_current == consts.AVAILABILITY_ONLINE) and \
if (avail_status_current == dccommon_consts.AVAILABILITY_ONLINE) and \
(audit_fail_count < consts.AVAIL_FAIL_COUNT_TO_ALARM):
# Do not set offline until we have failed audit
# the requisite number of times
avail_to_set = consts.AVAILABILITY_ONLINE
avail_to_set = dccommon_consts.AVAILABILITY_ONLINE
else:
# In the case of a one off blip, we may need to set the
# fail count back to 0
@ -412,7 +412,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
if avail_to_set != avail_status_current:
if avail_to_set == consts.AVAILABILITY_ONLINE:
if avail_to_set == dccommon_consts.AVAILABILITY_ONLINE:
audit_fail_count = 0
LOG.debug('Setting new availability status: %s '
@ -442,8 +442,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
update_state_only=True)
# If subcloud is managed and online, audit additional resources
if (subcloud.management_state == consts.MANAGEMENT_MANAGED and
avail_to_set == consts.AVAILABILITY_ONLINE):
if (subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED and
avail_to_set == dccommon_consts.AVAILABILITY_ONLINE):
# Get alarm summary and store in db,
if fm_client:
self.alarm_aggr.update_alarm_summary(subcloud_name, fm_client)

View File

@ -25,18 +25,6 @@ CERTS_VAULT_DIR = "/opt/dc-vault/certs"
LOADS_VAULT_DIR = "/opt/dc-vault/loads"
PATCH_VAULT_DIR = "/opt/dc-vault/patches"
# Well known region names
SYSTEM_CONTROLLER_NAME = "SystemController"
DEFAULT_REGION_NAME = "RegionOne"
# Subcloud management state
MANAGEMENT_UNMANAGED = "unmanaged"
MANAGEMENT_MANAGED = "managed"
# Subcloud availability status
AVAILABILITY_OFFLINE = "offline"
AVAILABILITY_ONLINE = "online"
# Admin status for hosts
ADMIN_LOCKED = 'locked'
ADMIN_UNLOCKED = 'unlocked'
@ -53,11 +41,6 @@ AVAILABILITY_DEGRADED = 'degraded'
PERSONALITY_CONTROLLER_ACTIVE = 'Controller-Active'
PERSONALITY_CONTROLLER_STANDBY = 'Controller-Standby'
# Subcloud sync status
SYNC_STATUS_UNKNOWN = "unknown"
SYNC_STATUS_IN_SYNC = "in-sync"
SYNC_STATUS_OUT_OF_SYNC = "out-of-sync"
# Subcloud endpoint related database fields
ENDPOINT_SYNC_STATUS = "endpoint_sync_status"
SYNC_STATUS = "sync_status"

View File

@ -29,7 +29,7 @@ from oslo_log import log as logging
from tsconfig.tsconfig import SW_VERSION
from dccommon.consts import DEPLOY_DIR
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon.exceptions import PlaybookExecutionFailed
@ -44,7 +44,7 @@ from dcmanager.db import api as db_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEPLOY_BASE_DIR = DEPLOY_DIR + '/' + SW_VERSION
DEPLOY_BASE_DIR = dccommon_consts.DEPLOY_DIR + '/' + SW_VERSION
PREPARE_PRESTAGE_PACKAGES_OUTPUT_PATH = DEPLOY_BASE_DIR + '/prestage/shared'
PRESTAGE_PREPARATION_COMPLETED_FILE = os.path.join(
PREPARE_PRESTAGE_PACKAGES_OUTPUT_PATH, '.prestage_preparation_completed')
@ -71,16 +71,16 @@ def _get_system_controller_upgrades():
# get a cached keystone client (and token)
try:
os_client = OpenStackDriver(
region_name=consts.SYSTEM_CONTROLLER_NAME,
region_name=dccommon_consts.SYSTEM_CONTROLLER_NAME,
region_clients=None)
except Exception:
LOG.exception("Failed to get keystone client for %s",
consts.SYSTEM_CONTROLLER_NAME)
dccommon_consts.SYSTEM_CONTROLLER_NAME)
raise
ks_client = os_client.keystone_client
sysinv_client = SysinvClient(
consts.SYSTEM_CONTROLLER_NAME, ks_client.session,
dccommon_consts.SYSTEM_CONTROLLER_NAME, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
return sysinv_client.get_upgrades()
@ -95,7 +95,7 @@ def global_prestage_validate(payload):
if is_system_controller_upgrading():
raise exceptions.PrestagePreCheckFailedException(
subcloud=consts.SYSTEM_CONTROLLER_NAME,
subcloud=dccommon_consts.SYSTEM_CONTROLLER_NAME,
details='Prestage operations not allowed while system'
' controller upgrade is in progress.')
@ -127,13 +127,13 @@ def initial_subcloud_validate(subcloud):
"""
LOG.debug("Validating subcloud prestage '%s'", subcloud.name)
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
raise exceptions.PrestagePreCheckFailedException(
subcloud=subcloud.name,
orch_skip=True,
details="Subcloud is offline.")
if subcloud.management_state != consts.MANAGEMENT_MANAGED:
if subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED:
raise exceptions.PrestagePreCheckFailedException(
subcloud=subcloud.name,
orch_skip=True,

View File

@ -34,7 +34,6 @@ from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.common import exceptions
from dcmanager.db import api as db_api
from dcorch.common import consts as dcorch_consts
LOG = logging.getLogger(__name__)
@ -175,9 +174,9 @@ def validate_expiry_date(expiry_date):
def validate_quota_limits(payload):
for resource in payload:
# Check valid resource name
if resource not in itertools.chain(dcorch_consts.CINDER_QUOTA_FIELDS,
dcorch_consts.NOVA_QUOTA_FIELDS,
dcorch_consts.NEUTRON_QUOTA_FIELDS):
if resource not in itertools.chain(dccommon_consts.CINDER_QUOTA_FIELDS,
dccommon_consts.NOVA_QUOTA_FIELDS,
dccommon_consts.NEUTRON_QUOTA_FIELDS):
raise exceptions.InvalidInputError
# Check valid quota limit value in case for put/post
if isinstance(payload, dict) and (not isinstance(

View File

@ -23,7 +23,7 @@ SQLAlchemy is currently the only supported backend.
from oslo_config import cfg
from oslo_db import api
from dcmanager.common import consts
from dccommon import consts as dccommon_consts
CONF = cfg.CONF
@ -377,7 +377,7 @@ def strategy_step_db_model_to_dict(strategy_step):
if strategy_step.subcloud is not None:
cloud = strategy_step.subcloud.name
else:
cloud = consts.SYSTEM_CONTROLLER_NAME
cloud = dccommon_consts.SYSTEM_CONTROLLER_NAME
result = {"id": strategy_step.id,
"cloud": cloud,
"stage": strategy_step.stage,

View File

@ -38,12 +38,12 @@ from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import load_only
from sqlalchemy.sql.expression import true
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import exceptions as exception
from dcmanager.common.i18n import _
from dcmanager.db.sqlalchemy import migration
from dcmanager.db.sqlalchemy import models
from dcorch.common import consts as dcorch_consts
LOG = logging.getLogger(__name__)
@ -349,8 +349,8 @@ def subcloud_create(context, name, description, location, software_version,
subcloud_ref.description = description
subcloud_ref.location = location
subcloud_ref.software_version = software_version
subcloud_ref.management_state = consts.MANAGEMENT_UNMANAGED
subcloud_ref.availability_status = consts.AVAILABILITY_OFFLINE
subcloud_ref.management_state = dccommon_consts.MANAGEMENT_UNMANAGED
subcloud_ref.availability_status = dccommon_consts.AVAILABILITY_OFFLINE
subcloud_ref.management_subnet = management_subnet
subcloud_ref.management_gateway_ip = management_gateway_ip
subcloud_ref.management_start_ip = management_start_ip
@ -454,7 +454,7 @@ def subcloud_status_create(context, subcloud_id, endpoint_type):
subcloud_status_ref = models.SubcloudStatus()
subcloud_status_ref.subcloud_id = subcloud_id
subcloud_status_ref.endpoint_type = endpoint_type
subcloud_status_ref.sync_status = consts.SYNC_STATUS_UNKNOWN
subcloud_status_ref.sync_status = dccommon_consts.SYNC_STATUS_UNKNOWN
session.add(subcloud_status_ref)
return subcloud_status_ref
@ -462,11 +462,11 @@ def subcloud_status_create(context, subcloud_id, endpoint_type):
@require_admin_context
def subcloud_status_create_all(context, subcloud_id):
with write_session() as session:
for endpoint_type in dcorch_consts.ENDPOINT_TYPES_LIST:
for endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
subcloud_status_ref = models.SubcloudStatus()
subcloud_status_ref.subcloud_id = subcloud_id
subcloud_status_ref.endpoint_type = endpoint_type
subcloud_status_ref.sync_status = consts.SYNC_STATUS_UNKNOWN
subcloud_status_ref.sync_status = dccommon_consts.SYNC_STATUS_UNKNOWN
session.add(subcloud_status_ref)

View File

@ -38,7 +38,6 @@ from dccommon import kubeoperator
from dccommon.subcloud_install import SubcloudInstall
from dccommon.utils import run_playbook
from dcorch.common import consts as dcorch_consts
from dcorch.rpc import client as dcorch_rpc_client
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
@ -289,11 +288,11 @@ class SubcloudManager(manager.Manager):
# Create a new route to this subcloud on the management interface
# on both controllers.
m_ks_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
subcloud_subnet = netaddr.IPNetwork(payload['management_subnet'])
endpoint = m_ks_client.endpoint_cache.get_endpoint('sysinv')
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME,
sysinv_client = SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
m_ks_client.session,
endpoint=endpoint)
LOG.debug("Getting cached regionone data for %s" % subcloud.name)
@ -321,23 +320,23 @@ class SubcloudManager(manager.Manager):
endpoint_ip = '[' + endpoint_ip + ']'
for service in m_ks_client.services_list:
if service.type == dcorch_consts.ENDPOINT_TYPE_PLATFORM:
if service.type == dccommon_consts.ENDPOINT_TYPE_PLATFORM:
admin_endpoint_url = "https://{}:6386/v1".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_IDENTITY:
elif service.type == dccommon_consts.ENDPOINT_TYPE_IDENTITY:
admin_endpoint_url = "https://{}:5001/v3".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_PATCHING:
elif service.type == dccommon_consts.ENDPOINT_TYPE_PATCHING:
admin_endpoint_url = "https://{}:5492".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_FM:
elif service.type == dccommon_consts.ENDPOINT_TYPE_FM:
admin_endpoint_url = "https://{}:18003".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_NFV:
elif service.type == dccommon_consts.ENDPOINT_TYPE_NFV:
admin_endpoint_url = "https://{}:4546".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
@ -518,7 +517,7 @@ class SubcloudManager(manager.Manager):
subcloud.name, INVENTORY_FILE_POSTFIX)
m_ks_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
cached_regionone_data = self._get_cached_regionone_data(m_ks_client)
self._populate_payload_with_cached_keystone_data(
@ -630,7 +629,7 @@ class SubcloudManager(manager.Manager):
# Retrieve the subcloud details from the database
subcloud = db_api.subcloud_get(context, subcloud_id)
if subcloud.management_state != consts.MANAGEMENT_UNMANAGED:
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED:
raise exceptions.SubcloudNotUnmanaged()
db_api.subcloud_update(context, subcloud_id,
@ -942,14 +941,14 @@ class SubcloudManager(manager.Manager):
"""Delete the routes to this subcloud"""
keystone_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
# Delete the route to this subcloud on the management interface on
# both controllers.
management_subnet = netaddr.IPNetwork(subcloud.management_subnet)
endpoint = keystone_client.endpoint_cache.get_endpoint('sysinv')
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME, keystone_client.session,
sysinv_client = SysinvClient(dccommon_consts.DEFAULT_REGION_NAME, keystone_client.session,
endpoint=endpoint)
cached_regionone_data = self._get_cached_regionone_data(keystone_client, sysinv_client)
for mgmt_if_uuid in cached_regionone_data['mgmt_interface_uuids']:
@ -994,7 +993,7 @@ class SubcloudManager(manager.Manager):
# down so is not accessible. Therefore set up a session with the
# Central Region Keystone ONLY.
keystone_client = OpenStackDriver(
region_name=consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
# Delete keystone endpoints for subcloud
@ -1032,11 +1031,11 @@ class SubcloudManager(manager.Manager):
subcloud = db_api.subcloud_get(context, subcloud_id)
# Semantic checking
if subcloud.management_state != consts.MANAGEMENT_UNMANAGED:
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED:
raise exceptions.SubcloudNotUnmanaged()
if subcloud.availability_status == \
consts.AVAILABILITY_ONLINE:
dccommon_consts.AVAILABILITY_ONLINE:
raise exceptions.SubcloudNotOffline()
# Ansible inventory filename for the specified subcloud
@ -1061,7 +1060,7 @@ class SubcloudManager(manager.Manager):
"subcloud=%s" % subcloud.name),
(fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC,
"subcloud=%s.resource=%s" %
(subcloud.name, dcorch_consts.ENDPOINT_TYPE_DC_CERT))):
(subcloud.name, dccommon_consts.ENDPOINT_TYPE_DC_CERT))):
try:
fault = self.fm_api.get_fault(alarm_id,
entity_instance_id)
@ -1103,14 +1102,14 @@ class SubcloudManager(manager.Manager):
# Semantic checking
if management_state:
if management_state == consts.MANAGEMENT_UNMANAGED:
if subcloud.management_state == consts.MANAGEMENT_UNMANAGED:
if management_state == dccommon_consts.MANAGEMENT_UNMANAGED:
if subcloud.management_state == dccommon_consts.MANAGEMENT_UNMANAGED:
LOG.warning("Subcloud %s already unmanaged" % subcloud_id)
raise exceptions.BadRequest(
resource='subcloud',
msg='Subcloud is already unmanaged')
elif management_state == consts.MANAGEMENT_MANAGED:
if subcloud.management_state == consts.MANAGEMENT_MANAGED:
elif management_state == dccommon_consts.MANAGEMENT_MANAGED:
if subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED:
LOG.warning("Subcloud %s already managed" % subcloud_id)
raise exceptions.BadRequest(
resource='subcloud',
@ -1125,7 +1124,7 @@ class SubcloudManager(manager.Manager):
resource='subcloud',
msg='Subcloud can be managed only if deploy status is complete')
if subcloud.availability_status != \
consts.AVAILABILITY_ONLINE:
dccommon_consts.AVAILABILITY_ONLINE:
LOG.warning("Subcloud %s is not online" % subcloud_id)
raise exceptions.SubcloudNotOnline()
else:
@ -1168,7 +1167,7 @@ class SubcloudManager(manager.Manager):
description=description,
location=location)
if management_state == consts.MANAGEMENT_UNMANAGED:
if management_state == dccommon_consts.MANAGEMENT_UNMANAGED:
# set all endpoint statuses to unknown, except the dc-cert
# endpoint which continues to be audited for unmanaged
# subclouds
@ -1176,9 +1175,9 @@ class SubcloudManager(manager.Manager):
context,
subcloud_name=subcloud.name,
endpoint_type=None,
sync_status=consts.SYNC_STATUS_UNKNOWN,
ignore_endpoints=[dcorch_consts.ENDPOINT_TYPE_DC_CERT])
elif management_state == consts.MANAGEMENT_MANAGED:
sync_status=dccommon_consts.SYNC_STATUS_UNKNOWN,
ignore_endpoints=[dccommon_consts.ENDPOINT_TYPE_DC_CERT])
elif management_state == dccommon_consts.MANAGEMENT_MANAGED:
# Subcloud is managed
# Tell cert-mon to audit endpoint certificate
LOG.info('Request for managed audit for %s' % subcloud.name)
@ -1187,8 +1186,8 @@ class SubcloudManager(manager.Manager):
# Since sysinv user is sync'ed during bootstrap, trigger the
# related audits. Patch and load audits are delayed until the
# identity resource synchronized by dcdbsync is complete.
exclude_endpoints = [dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_LOAD]
exclude_endpoints = [dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_LOAD]
self.audit_rpc_client.trigger_subcloud_audits(
context, subcloud_id, exclude_endpoints)
@ -1282,9 +1281,10 @@ class SubcloudManager(manager.Manager):
if regionone_sysinv_client is None:
endpoint = regionone_keystone_client.endpoint_cache.get_endpoint('sysinv')
regionone_sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME,
regionone_keystone_client.session,
endpoint=endpoint)
regionone_sysinv_client = SysinvClient(
dccommon_consts.DEFAULT_REGION_NAME,
regionone_keystone_client.session,
endpoint=endpoint)
controllers = regionone_sysinv_client.get_controller_hosts()
mgmt_interface_uuids = []

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -22,6 +22,7 @@ import time
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
@ -99,7 +100,7 @@ class OrchThread(threading.Thread):
self.thread_group_manager.stop()
@staticmethod
def get_ks_client(region_name=consts.DEFAULT_REGION_NAME):
def get_ks_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
"""This will get a cached keystone client (and token)
throws an exception if keystone client cannot be initialized
@ -109,7 +110,7 @@ class OrchThread(threading.Thread):
return os_client.keystone_client
@staticmethod
def get_vim_client(region_name=consts.DEFAULT_REGION_NAME):
def get_vim_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
ks_client = OrchThread.get_ks_client(region_name)
return vim.VimClient(region_name, ks_client.session)
@ -118,7 +119,7 @@ class OrchThread(threading.Thread):
"""Get the region name for a strategy step"""
if strategy_step.subcloud_id is None:
# This is the SystemController.
return consts.DEFAULT_REGION_NAME
return dccommon_consts.DEFAULT_REGION_NAME
return strategy_step.subcloud.name
@staticmethod
@ -340,7 +341,7 @@ class OrchThread(threading.Thread):
# started, it will be allowed to complete.
if strategy_step.subcloud_id is not None and \
strategy_step.subcloud.management_state == \
consts.MANAGEMENT_UNMANAGED:
dccommon_consts.MANAGEMENT_UNMANAGED:
message = ("Subcloud %s is unmanaged." %
strategy_step.subcloud.name)
LOG.warn(message)

View File

@ -23,6 +23,7 @@ import time
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import patching_v1
from dccommon.drivers.openstack.patching_v1 import PatchingClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
@ -91,7 +92,7 @@ class PatchOrchThread(threading.Thread):
LOG.info("PatchOrchThread Stopped")
@staticmethod
def get_ks_client(region_name=consts.DEFAULT_REGION_NAME):
def get_ks_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
"""This will get a cached keystone client (and token)"""
try:
os_client = OpenStackDriver(
@ -102,17 +103,17 @@ class PatchOrchThread(threading.Thread):
LOG.warn('Failure initializing KeystoneClient %s' % region_name)
raise
def get_sysinv_client(self, region_name=consts.DEFAULT_REGION_NAME):
def get_sysinv_client(self, region_name=dccommon_consts.DEFAULT_REGION_NAME):
ks_client = self.get_ks_client(region_name)
return SysinvClient(region_name, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
def get_patching_client(self, region_name=consts.DEFAULT_REGION_NAME):
def get_patching_client(self, region_name=dccommon_consts.DEFAULT_REGION_NAME):
ks_client = self.get_ks_client(region_name)
return PatchingClient(region_name, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('patching'))
def get_vim_client(self, region_name=consts.DEFAULT_REGION_NAME):
def get_vim_client(self, region_name=dccommon_consts.DEFAULT_REGION_NAME):
ks_client = self.get_ks_client(region_name)
return vim.VimClient(region_name, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('vim'))
@ -122,7 +123,7 @@ class PatchOrchThread(threading.Thread):
"""Get the region name for a strategy step"""
if strategy_step.subcloud_id is None:
# This is the SystemController.
return consts.DEFAULT_REGION_NAME
return dccommon_consts.DEFAULT_REGION_NAME
else:
return strategy_step.subcloud.name
@ -151,7 +152,7 @@ class PatchOrchThread(threading.Thread):
"""Query the RegionOne to determine what patches should be applied/committed."""
self.regionone_patches = \
self.get_patching_client(consts.DEFAULT_REGION_NAME).query()
self.get_patching_client(dccommon_consts.DEFAULT_REGION_NAME).query()
LOG.debug("regionone_patches: %s" % self.regionone_patches)
# Build lists of patches that should be applied in this subcloud,
@ -169,7 +170,7 @@ class PatchOrchThread(threading.Thread):
# Then query RegionOne to determine what patches should be committed.
regionone_committed_patches = self.get_patching_client(
consts.DEFAULT_REGION_NAME).query(
dccommon_consts.DEFAULT_REGION_NAME).query(
state=patching_v1.PATCH_STATE_COMMITTED)
LOG.debug("regionone_committed_patches: %s" %
regionone_committed_patches)
@ -334,7 +335,7 @@ class PatchOrchThread(threading.Thread):
# started, it will be allowed to complete.
if strategy_step.subcloud_id is not None and \
strategy_step.subcloud.management_state == \
consts.MANAGEMENT_UNMANAGED:
dccommon_consts.MANAGEMENT_UNMANAGED:
message = ("Subcloud %s is unmanaged." %
strategy_step.subcloud.name)
LOG.warn(message)

View File

@ -8,13 +8,13 @@ import six
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.barbican import BarbicanClient
from dccommon.drivers.openstack.fm import FmClient
from dccommon.drivers.openstack.patching_v1 import PatchingClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon.drivers.openstack.vim import VimClient
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common.exceptions import InvalidParameterValue
@ -79,11 +79,11 @@ class BaseState(object):
"""Get the region name for a strategy step"""
if strategy_step.subcloud_id is None:
# This is the SystemController.
return consts.DEFAULT_REGION_NAME
return dccommon_consts.DEFAULT_REGION_NAME
return strategy_step.subcloud.name
@staticmethod
def get_keystone_client(region_name=consts.DEFAULT_REGION_NAME):
def get_keystone_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
"""Construct a (cached) keystone client (and token)"""
try:
@ -108,13 +108,13 @@ class BaseState(object):
keystone_client = self.get_keystone_client(region_name)
return FmClient(region_name, keystone_client.session)
def get_patching_client(self, region_name=consts.DEFAULT_REGION_NAME):
def get_patching_client(self, region_name=dccommon_consts.DEFAULT_REGION_NAME):
keystone_client = self.get_keystone_client(region_name)
return PatchingClient(region_name, keystone_client.session)
@property
def local_sysinv(self):
return self.get_sysinv_client(consts.DEFAULT_REGION_NAME)
return self.get_sysinv_client(dccommon_consts.DEFAULT_REGION_NAME)
@property
def subcloud_sysinv(self):

View File

@ -1,16 +1,16 @@
#
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common.exceptions import StrategyStoppedException
from dcmanager.orchestrator.states.base import BaseState
from dcmanager.orchestrator.states.firmware import utils
from dcmanager.rpc import client as dcmanager_rpc_client
from dcorch.common import consts as dcorch_consts
# When an unlock occurs, a reboot is triggered. During reboot, API calls fail.
# The max time allowed here is 30 minutes (ie: 30 queries with 1 minute sleep)
@ -30,15 +30,15 @@ class FinishingFwUpdateState(BaseState):
def align_subcloud_status(self, strategy_step):
self.info_log(strategy_step,
"Setting endpoint status of %s to %s"
% (dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
consts.SYNC_STATUS_IN_SYNC))
% (dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
dccommon_consts.SYNC_STATUS_IN_SYNC))
dcmanager_state_rpc_client = dcmanager_rpc_client.SubcloudStateClient()
# The subcloud name is the same as the region in the strategy_step
dcmanager_state_rpc_client.update_subcloud_endpoint_status(
self.context,
subcloud_name=self.get_region_name(strategy_step),
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
def perform_state_action(self, strategy_step):
"""Finish the firmware update.

View File

@ -1,10 +1,11 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.orchestrator.states.base import BaseState
from dcmanager.orchestrator.states.firmware import utils
@ -43,7 +44,7 @@ class ImportingFirmwareState(BaseState):
# ============== query system controller images ==============
system_controller_images = self.get_sysinv_client(
consts.DEFAULT_REGION_NAME).get_device_images()
dccommon_consts.DEFAULT_REGION_NAME).get_device_images()
# determine list of applied system controller images
applied_system_controller_images = \
utils.filter_applied_images(system_controller_images,

View File

@ -1,9 +1,9 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
# Copyright (c) 2021-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dcmanager.common.consts import DEFAULT_REGION_NAME
from dccommon.consts import DEFAULT_REGION_NAME
from dcmanager.common.consts import STRATEGY_STATE_COMPLETE
from dcmanager.common.consts \
import STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY

View File

@ -8,10 +8,10 @@ import socket
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.patching_v1 import PatchingClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcmanager.common import consts
LOG = logging.getLogger(__name__)
@ -26,18 +26,18 @@ CLIENT_READ_MAX_ATTEMPTS = 2
def get_sysinv_client():
ks_client = get_keystone_client()
return SysinvClient(consts.DEFAULT_REGION_NAME, ks_client.session,
return SysinvClient(dccommon_consts.DEFAULT_REGION_NAME, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'),
timeout=CLIENT_READ_TIMEOUT_SECONDS)
def get_patching_client():
ks_client = get_keystone_client()
return PatchingClient(consts.DEFAULT_REGION_NAME, ks_client.session,
return PatchingClient(dccommon_consts.DEFAULT_REGION_NAME, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('patching'))
def get_keystone_client(region_name=consts.DEFAULT_REGION_NAME):
def get_keystone_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
"""Construct a (cached) keystone client (and token)"""
try:

View File

@ -3,6 +3,7 @@
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import exceptions
from dcmanager.orchestrator.states.base import BaseState
@ -51,7 +52,7 @@ class InstallingLicenseState(BaseState):
else:
# An unexpected error occurred querying the license
raise exceptions.LicenseInstallError(
subcloud_id=consts.SYSTEM_CONTROLLER_NAME)
subcloud_id=dccommon_consts.SYSTEM_CONTROLLER_NAME)
# retrieve the keystone session for the subcloud and query its license
subcloud_sysinv_client = \

View File

@ -6,6 +6,7 @@
import copy
import re
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sysinv_v1 import HOST_FS_NAME_SCRATCH
from dcmanager.common import consts
from dcmanager.common.exceptions import ManualRecoveryRequiredException
@ -172,7 +173,7 @@ class PreCheckState(BaseState):
subcloud = db_api.subcloud_get(self.context, strategy_step.subcloud.id)
if subcloud.availability_status == consts.AVAILABILITY_ONLINE:
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE:
subcloud_sysinv_client = None
try:
subcloud_sysinv_client = self.get_sysinv_client(strategy_step.subcloud.name)

View File

@ -1,5 +1,5 @@
# Copyright 2017 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -20,6 +20,7 @@ import threading
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
from dcmanager.common import consts
from dcmanager.common import exceptions
@ -35,7 +36,6 @@ from dcmanager.orchestrator.kube_upgrade_orch_thread \
from dcmanager.orchestrator.patch_orch_thread import PatchOrchThread
from dcmanager.orchestrator.prestage_orch_thread import PrestageOrchThread
from dcmanager.orchestrator.sw_upgrade_orch_thread import SwUpgradeOrchThread
from dcorch.common import consts as dcorch_consts
LOG = logging.getLogger(__name__)
@ -113,56 +113,56 @@ class SwUpdateManager(manager.Manager):
"""
if strategy_type == consts.SW_UPDATE_TYPE_PATCH:
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_PATCHING and
dccommon_consts.ENDPOINT_TYPE_PATCHING and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# force option only has an effect in offline case for upgrade
if force and (availability_status != consts.AVAILABILITY_ONLINE):
if force and (availability_status != dccommon_consts.AVAILABILITY_ONLINE):
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_LOAD and
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status !=
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
else:
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_LOAD and
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_FIRMWARE and
dccommon_consts.ENDPOINT_TYPE_FIRMWARE and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
if force:
# run for in-sync and out-of-sync (but not unknown)
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
dccommon_consts.ENDPOINT_TYPE_KUBERNETES and
subcloud_status.sync_status !=
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
else:
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
dccommon_consts.ENDPOINT_TYPE_KUBERNETES and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
if force:
# run for in-sync and out-of-sync (but not unknown)
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
subcloud_status.sync_status !=
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
else:
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_PRESTAGE:
# For prestage we reuse the ENDPOINT_TYPE_LOAD.
# We just need to key off a unique endpoint,
# so that the strategy is created only once.
return (subcloud_status.endpoint_type
== dcorch_consts.ENDPOINT_TYPE_LOAD)
== dccommon_consts.ENDPOINT_TYPE_LOAD)
# Unimplemented strategy_type status check. Log an error
LOG.error("_validate_subcloud_status_sync for %s not implemented" %
strategy_type)
@ -311,7 +311,7 @@ class SwUpdateManager(manager.Manager):
# todo(abailey): refactor this code to use classes
cloud_name = payload.get('cloud_name')
prestage_global_validated = False
if cloud_name and cloud_name != consts.SYSTEM_CONTROLLER_NAME:
if cloud_name and cloud_name != dccommon_consts.SYSTEM_CONTROLLER_NAME:
# Make sure subcloud exists
try:
subcloud = db_api.subcloud_get_by_name(context, cloud_name)
@ -323,15 +323,15 @@ class SwUpdateManager(manager.Manager):
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# Make sure subcloud requires upgrade
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dcorch_consts.ENDPOINT_TYPE_LOAD)
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require upgrade' % cloud_name)
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dcorch_consts.ENDPOINT_TYPE_FIRMWARE)
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_FIRMWARE)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require firmware update'
@ -343,8 +343,8 @@ class SwUpdateManager(manager.Manager):
else:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id,
dcorch_consts.ENDPOINT_TYPE_KUBERNETES)
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
dccommon_consts.ENDPOINT_TYPE_KUBERNETES)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require kubernetes update'
@ -356,8 +356,8 @@ class SwUpdateManager(manager.Manager):
else:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id,
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA)
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require kube rootca update'
@ -365,8 +365,8 @@ class SwUpdateManager(manager.Manager):
elif strategy_type == consts.SW_UPDATE_TYPE_PATCH:
# Make sure subcloud requires patching
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dcorch_consts.ENDPOINT_TYPE_PATCHING)
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_PATCHING)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require patching' % cloud_name)
@ -428,62 +428,62 @@ class SwUpdateManager(manager.Manager):
subclouds_processed = list()
for subcloud, subcloud_status in subclouds:
if (cloud_name and subcloud.name != cloud_name or
subcloud.management_state != consts.MANAGEMENT_MANAGED):
subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED):
# We are not updating this subcloud
continue
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if not force:
continue
elif (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_LOAD and
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status ==
consts.SYNC_STATUS_UNKNOWN):
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Upgrade sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_PATCH:
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_PATCHING and
dccommon_consts.ENDPOINT_TYPE_PATCHING and
subcloud_status.sync_status ==
consts.SYNC_STATUS_UNKNOWN):
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Patching sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_FIRMWARE and
dccommon_consts.ENDPOINT_TYPE_FIRMWARE and
subcloud_status.sync_status ==
consts.SYNC_STATUS_UNKNOWN):
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Firmware sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
dccommon_consts.ENDPOINT_TYPE_KUBERNETES and
subcloud_status.sync_status ==
consts.SYNC_STATUS_UNKNOWN):
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Kubernetes sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
dccommon_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
subcloud_status.sync_status ==
consts.SYNC_STATUS_UNKNOWN):
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Kube rootca update sync status is unknown for '
@ -551,11 +551,11 @@ class SwUpdateManager(manager.Manager):
for subcloud in subclouds_list:
stage_updated = False
if (cloud_name and subcloud.name != cloud_name or
subcloud.management_state != consts.MANAGEMENT_MANAGED):
subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED):
# We are not targeting for update this subcloud
continue
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
if not force:
continue

View File

@ -18,6 +18,7 @@ Client side of the DC Manager RPC API.
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import messaging
@ -82,8 +83,7 @@ class SubcloudStateClient(RPCClient):
def update_subcloud_endpoint_status(self, ctxt, subcloud_name=None,
endpoint_type=None,
sync_status=consts.
SYNC_STATUS_OUT_OF_SYNC,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=None):
# Note: This is an asynchronous operation.
# See below for synchronous method call
@ -95,8 +95,7 @@ class SubcloudStateClient(RPCClient):
def update_subcloud_endpoint_status_sync(self, ctxt, subcloud_name=None,
endpoint_type=None,
sync_status=consts.
SYNC_STATUS_OUT_OF_SYNC,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=None):
# Note: synchronous
return self.call(ctxt, self.make_msg('update_subcloud_endpoint_status',

View File

@ -25,7 +25,7 @@ from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from dcorch.common import consts as dcorch_consts
from dccommon import consts as dccommon_consts
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
from dcmanager.common import consts
@ -112,8 +112,7 @@ class DCManagerStateService(service.Service):
@request_context
def update_subcloud_endpoint_status(self, context, subcloud_name=None,
endpoint_type=None,
sync_status=consts.
SYNC_STATUS_OUT_OF_SYNC,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None):
# Updates subcloud endpoint sync status
@ -131,20 +130,20 @@ class DCManagerStateService(service.Service):
# If the patching sync status is being set to unknown, trigger the
# patching audit so it can update the sync status ASAP.
if endpoint_type == dcorch_consts.ENDPOINT_TYPE_PATCHING and \
sync_status == consts.SYNC_STATUS_UNKNOWN:
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING and \
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
self.audit_rpc_client.trigger_patch_audit(context)
# If the firmware sync status is being set to unknown, trigger the
# firmware audit so it can update the sync status ASAP.
if endpoint_type == dcorch_consts.ENDPOINT_TYPE_FIRMWARE and \
sync_status == consts.SYNC_STATUS_UNKNOWN:
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_FIRMWARE and \
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
self.audit_rpc_client.trigger_firmware_audit(context)
# If the kubernetes sync status is being set to unknown, trigger the
# kubernetes audit so it can update the sync status ASAP.
if endpoint_type == dcorch_consts.ENDPOINT_TYPE_KUBERNETES and \
sync_status == consts.SYNC_STATUS_UNKNOWN:
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_KUBERNETES and \
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
self.audit_rpc_client.trigger_kubernetes_audit(context)
return

View File

@ -19,7 +19,7 @@
from oslo_log import log as logging
from dcorch.common import consts as dcorch_consts
from dccommon import consts as dccommon_consts
from dcorch.rpc import client as dcorch_rpc_client
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
@ -98,7 +98,7 @@ class SubcloudStateManager(manager.Manager):
db_api.subcloud_endpoint_status_db_model_to_dict(
subcloud_status))
if subcloud_status.endpoint_type == \
dcorch_consts.ENDPOINT_TYPE_IDENTITY:
dccommon_consts.ENDPOINT_TYPE_IDENTITY:
original_identity_status = subcloud_status.sync_status
except Exception as e:
LOG.exception(e)
@ -133,9 +133,9 @@ class SubcloudStateManager(manager.Manager):
# Trigger subcloud patch and load audits for the subcloud after
# its identity endpoint turns to other status from unknown
if endpoint_type == dcorch_consts.ENDPOINT_TYPE_IDENTITY \
and sync_status != consts.SYNC_STATUS_UNKNOWN \
and original_identity_status == consts.SYNC_STATUS_UNKNOWN:
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY \
and sync_status != dccommon_consts.SYNC_STATUS_UNKNOWN \
and original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
LOG.debug('Request for patch and load audit for %s after updating '
'identity out of unknown' % subcloud.name)
self.audit_rpc_client.trigger_subcloud_patch_load_audits(
@ -147,7 +147,7 @@ class SubcloudStateManager(manager.Manager):
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC,
entity_instance_id)
if (sync_status != consts.SYNC_STATUS_OUT_OF_SYNC) \
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
and fault:
try:
self.fm_api.clear_fault(
@ -157,7 +157,7 @@ class SubcloudStateManager(manager.Manager):
LOG.exception(e)
elif not fault and alarmable and \
(sync_status == consts.SYNC_STATUS_OUT_OF_SYNC):
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
fault = fm_api.Fault(
@ -213,7 +213,7 @@ class SubcloudStateManager(manager.Manager):
# given subcloud if fm_api support it. Be careful with the
# dc-cert endpoint when adding the above; the endpoint
# alarm must remain for offline subclouds.
if (sync_status != consts.SYNC_STATUS_OUT_OF_SYNC) \
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
and fault:
try:
self.fm_api.clear_fault(
@ -223,7 +223,7 @@ class SubcloudStateManager(manager.Manager):
LOG.exception(e)
elif not fault and alarmable and \
(sync_status == consts.SYNC_STATUS_OUT_OF_SYNC):
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
fault = fm_api.Fault(
@ -262,7 +262,7 @@ class SubcloudStateManager(manager.Manager):
self, context,
subcloud_name,
endpoint_type=None,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None):
"""Update subcloud endpoint status
@ -304,10 +304,10 @@ class SubcloudStateManager(manager.Manager):
# This means if a subcloud is going offline or unmanaged, then
# the sync status update must be done first.
#
if (sync_status != consts.SYNC_STATUS_IN_SYNC or
((subcloud.availability_status == consts.AVAILABILITY_ONLINE) and
(subcloud.management_state == consts.MANAGEMENT_MANAGED
or endpoint_type == dcorch_consts.ENDPOINT_TYPE_DC_CERT))):
if (sync_status != dccommon_consts.SYNC_STATUS_IN_SYNC or
((subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE) and
(subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED
or endpoint_type == dccommon_consts.ENDPOINT_TYPE_DC_CERT))):
# update a single subcloud
try:
self._do_update_subcloud_endpoint_status(context,
@ -329,7 +329,7 @@ class SubcloudStateManager(manager.Manager):
self, context,
subcloud_name=None,
endpoint_type=None,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None):
"""Update subcloud endpoint status
@ -380,7 +380,7 @@ class SubcloudStateManager(manager.Manager):
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
if fault and (availability_status == consts.AVAILABILITY_ONLINE):
if fault and (availability_status == dccommon_consts.AVAILABILITY_ONLINE):
try:
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
@ -390,7 +390,7 @@ class SubcloudStateManager(manager.Manager):
subcloud_name)
elif not fault and \
(availability_status == consts.AVAILABILITY_OFFLINE):
(availability_status == dccommon_consts.AVAILABILITY_OFFLINE):
try:
fault = fm_api.Fault(
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
@ -444,12 +444,12 @@ class SubcloudStateManager(manager.Manager):
self._raise_or_clear_subcloud_status_alarm(subcloud_name,
availability_status)
if availability_status == consts.AVAILABILITY_OFFLINE:
if availability_status == dccommon_consts.AVAILABILITY_OFFLINE:
# Subcloud is going offline, set all endpoint statuses to
# unknown.
self._update_subcloud_endpoint_status(
context, subcloud_name, endpoint_type=None,
sync_status=consts.SYNC_STATUS_UNKNOWN)
sync_status=dccommon_consts.SYNC_STATUS_UNKNOWN)
try:
updated_subcloud = db_api.subcloud_update(
@ -464,7 +464,7 @@ class SubcloudStateManager(manager.Manager):
' update: %s' % subcloud_name)
return
if availability_status == consts.AVAILABILITY_ONLINE:
if availability_status == dccommon_consts.AVAILABILITY_ONLINE:
# Subcloud is going online
# Tell cert-mon to audit endpoint certificate.
LOG.info('Request for online audit for %s' % subcloud_name)

View File

@ -26,6 +26,7 @@ import six
from six.moves import http_client
import webtest
from dccommon import consts as dccommon_consts
from dcmanager.api.controllers.v1 import subclouds
from dcmanager.common import consts
from dcmanager.common import prestage
@ -98,11 +99,11 @@ class Subcloud(object):
self.name = data['name']
self.description = data['description']
self.location = data['location']
self.management_state = consts.MANAGEMENT_UNMANAGED
self.management_state = dccommon_consts.MANAGEMENT_UNMANAGED
if is_online:
self.availability_status = consts.AVAILABILITY_ONLINE
self.availability_status = dccommon_consts.AVAILABILITY_ONLINE
else:
self.availability_status = consts.AVAILABILITY_OFFLINE
self.availability_status = dccommon_consts.AVAILABILITY_OFFLINE
self.deploy_status = data['deploy_status']
self.management_subnet = data['management_subnet']
self.management_gateway_ip = data['management_gateway_address']
@ -876,7 +877,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
mock_get_oam_addresses):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
updated_subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE)
get_url = FAKE_URL + '/' + str(updated_subcloud.id) + '/detail'
oam_addresses = FakeOAMAddressPool('10.10.10.254',
@ -910,7 +911,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
mock_get_oam_addresses):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
updated_subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE)
get_url = FAKE_URL + '/' + str(updated_subcloud.id) + '/detail'
mock_get_oam_addresses.return_value = None
@ -938,7 +939,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
def test_patch_subcloud(self, mock_get_patch_data,
mock_rpc_client):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
data = {'management-state': consts.MANAGEMENT_UNMANAGED}
data = {'management-state': dccommon_consts.MANAGEMENT_UNMANAGED}
mock_rpc_client().update_subcloud.return_value = True
mock_get_patch_data.return_value = data
response = self.app.patch_json(FAKE_URL + '/' + str(subcloud.id),
@ -948,7 +949,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
# Verify subcloud was updated with correct values
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(consts.MANAGEMENT_UNMANAGED,
self.assertEqual(dccommon_consts.MANAGEMENT_UNMANAGED,
updated_subcloud.management_state)
@mock.patch.object(rpc_client, 'ManagerClient')
@ -1147,7 +1148,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
def test_patch_subcloud_bad_force_value(self, mock_get_patch_data,
mock_rpc_client):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
data = {'management-state': consts.MANAGEMENT_MANAGED,
data = {'management-state': dccommon_consts.MANAGEMENT_MANAGED,
'force': 'bad-value'}
mock_get_patch_data.return_value = data
six.assertRaisesRegex(self, webtest.app.AppError, "400 *",
@ -1160,7 +1161,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
def test_patch_subcloud_forced_unmanaged(self, mock_get_patch_data,
mock_rpc_client):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
data = {'management-state': consts.MANAGEMENT_UNMANAGED,
data = {'management-state': dccommon_consts.MANAGEMENT_UNMANAGED,
'force': True}
mock_get_patch_data.return_value = data
six.assertRaisesRegex(self, webtest.app.AppError, "400 *",
@ -1173,7 +1174,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
def test_patch_subcloud_forced_manage(self, mock_get_patch_data,
mock_rpc_client):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
data = {'management-state': consts.MANAGEMENT_MANAGED,
data = {'management-state': dccommon_consts.MANAGEMENT_MANAGED,
'force': True}
mock_rpc_client().update_subcloud.return_value = True
mock_get_patch_data.return_value = data
@ -1183,7 +1184,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
mock_rpc_client().update_subcloud.assert_called_once_with(
mock.ANY,
mock.ANY,
management_state=consts.MANAGEMENT_MANAGED,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
description=None,
location=None,
group_id=None,
@ -1469,7 +1470,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
db_api.subcloud_update(self.ctx,
subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES)
reinstall_data = copy.copy(FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD)
mock_get_request_data.return_value = reinstall_data
@ -1614,7 +1615,7 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
db_api.subcloud_update(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
restore_payload = copy.copy(self.FAKE_RESTORE_PAYLOAD)
mock_rpc_client().restore_subcloud.return_value = True
@ -1726,8 +1727,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
mock_rpc_client):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_MANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password,
@ -1758,8 +1759,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_UNMANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_UNMANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password}
mock_controller_upgrade.return_value = list()
@ -1781,8 +1782,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_OFFLINE,
management_state=consts.MANAGEMENT_MANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password}
mock_controller_upgrade.return_value = list()
@ -1805,8 +1806,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
mock_rpc_client):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_MANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password,
@ -1835,8 +1836,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_MANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password,
@ -1869,8 +1870,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_MANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password,
@ -1899,8 +1900,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id, availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_MANAGED)
self.ctx, subcloud.id, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')
data = {'sysadmin_password': fake_password,
@ -1933,8 +1934,8 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
deploy_status='NotAllowedState')
fake_password = (base64.b64encode('testpass'.encode("utf-8"))).decode('ascii')

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -17,17 +17,17 @@ import mock
from oslo_config import cfg
import sys
from dccommon import consts as dccommon_consts
sys.modules['fm_core'] = mock.Mock()
from dcmanager.audit import firmware_audit
from dcmanager.audit import patch_audit
from dcmanager.audit import subcloud_audit_manager
from dcmanager.common import consts
from dcmanager.tests import base
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
CONF = cfg.CONF
@ -467,8 +467,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -499,8 +499,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -530,8 +530,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -561,8 +561,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -592,8 +592,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -623,8 +623,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -654,8 +654,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -685,7 +685,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -15,12 +15,11 @@
import mock
import uuid
from dccommon import consts as dccommon_consts
from dcmanager.audit import firmware_audit
from dcmanager.audit import kubernetes_audit
from dcmanager.audit import patch_audit
from dcmanager.audit import subcloud_audit_manager
from dcmanager.common import consts
from dcorch.common import consts as dcorch_consts
from dcmanager.tests import base
from dcmanager.tests import utils
@ -172,8 +171,8 @@ class TestKubernetesAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -200,8 +199,8 @@ class TestKubernetesAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -228,8 +227,8 @@ class TestKubernetesAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -257,8 +256,8 @@ class TestKubernetesAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -292,7 +291,7 @@ class TestKubernetesAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -17,16 +17,16 @@ import mock
from oslo_config import cfg
import sys
from dccommon import consts as dccommon_consts
sys.modules['fm_core'] = mock.Mock()
from dcmanager.audit import patch_audit
from dcmanager.audit import subcloud_audit_manager
from dcmanager.common import consts
from dcmanager.tests import base
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
CONF = cfg.CONF
@ -307,12 +307,12 @@ class TestPatchAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status. \
assert_has_calls(expected_calls)
@ -342,36 +342,36 @@ class TestPatchAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud3',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud3',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud4',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud4',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status.\
@ -402,12 +402,12 @@ class TestPatchAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name=name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC)]
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status.\
assert_has_calls(expected_calls)
@ -437,20 +437,20 @@ class TestPatchAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC),
]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status.\
assert_has_calls(expected_calls)
@ -481,20 +481,20 @@ class TestPatchAudit(base.DCManagerTestCase):
expected_calls = [
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PATCHING,
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_LOAD,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC),
]
self.fake_dcmanager_state_api.update_subcloud_endpoint_status.\
assert_has_calls(expected_calls)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -15,13 +15,15 @@
import mock
import sys
from dccommon import consts as dccommon_consts
sys.modules['fm_core'] = mock.Mock()
from dcmanager.audit import subcloud_audit_manager
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.tests import base
from dcorch.common import consts as dcorch_consts
class FakeAuditWorkerAPI(object):
@ -304,8 +306,8 @@ class TestAuditManager(base.DCManagerTestCase):
def test_audit_one_subcloud_exclude_endpoints(self):
subcloud = self.create_subcloud_static(self.ctx)
am = subcloud_audit_manager.SubcloudAuditManager()
exclude_endpoints = [dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_LOAD]
exclude_endpoints = [dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_LOAD]
am.trigger_subcloud_audits(self.ctx, subcloud.id, exclude_endpoints)
# Verify subaudits be requested.
result = db_api.subcloud_audits_get(self.ctx, subcloud.id)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -17,6 +17,7 @@ import mock
import random
import sys
sys.modules['fm_core'] = mock.Mock()
from dccommon import consts as dccommon_consts
@ -426,7 +427,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify the subcloud was set to online
self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with(
mock.ANY, subcloud.name, consts.AVAILABILITY_ONLINE,
mock.ANY, subcloud.name, dccommon_consts.AVAILABILITY_ONLINE,
False, 0)
# Verify the _update_subcloud_audit_fail_count is not called
@ -498,7 +499,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify the subcloud was set to online
self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with(
mock.ANY, subcloud.name, consts.AVAILABILITY_ONLINE,
mock.ANY, subcloud.name, dccommon_consts.AVAILABILITY_ONLINE,
False, 0)
# Verify the _update_subcloud_audit_fail_count is not called
@ -535,7 +536,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Set the subcloud to online
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Audit the subcloud
wm._audit_subcloud(subcloud, update_subcloud_state=False,
@ -578,7 +579,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Set the subcloud to online
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Audit the subcloud and force a state update
wm._audit_subcloud(subcloud, update_subcloud_state=True,
@ -594,7 +595,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify the subcloud state was updated even though no change
self.fake_dcmanager_state_api.update_subcloud_availability.assert_called_with(
mock.ANY, subcloud.name, consts.AVAILABILITY_ONLINE,
mock.ANY, subcloud.name, dccommon_consts.AVAILABILITY_ONLINE,
True, None)
# Verify the _update_subcloud_audit_fail_count is not called
@ -633,7 +634,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
management_state='managed',
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Mark a service group as inactive
self.fake_openstack_client.sysinv_client.get_service_groups_result = \
@ -678,7 +679,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Update the DB like dcmanager would do.
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_OFFLINE,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
audit_fail_count=audit_fail_count)
# Audit the subcloud again
@ -893,7 +894,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
management_state='managed',
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Audit the subcloud
wm._audit_subcloud(subcloud,
@ -951,7 +952,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
management_state='managed',
availability_status=consts.AVAILABILITY_ONLINE,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
openstack_installed=True)
# Remove stx-openstack application
@ -1012,7 +1013,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx, subcloud.id,
management_state='managed',
availability_status=consts.AVAILABILITY_ONLINE,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
openstack_installed=True)
# stx-openstack application is not active

View File

@ -16,6 +16,7 @@
#
from oslo_db import exception as db_exception
from dccommon import consts as dccommon_consts
from dcmanager.common import config
from dcmanager.common import consts
from dcmanager.common import exceptions
@ -23,7 +24,6 @@ from dcmanager.db import api as api
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.tests import base
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
config.register_options()
get_engine = api.get_engine
@ -236,7 +236,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
endpoint_type)
self.assertIsNotNone(new_subcloud_status)
self.assertEqual(endpoint_type, new_subcloud_status.endpoint_type)
self.assertEqual(consts.SYNC_STATUS_UNKNOWN,
self.assertEqual(dccommon_consts.SYNC_STATUS_UNKNOWN,
new_subcloud_status.sync_status)
def test_create_multiple_subcloud_statuses(self):
@ -248,7 +248,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
subcloud.id)
num_default_subcloud_statuses = len(default_subcloud_statuses)
self.assertEqual(num_default_subcloud_statuses,
len(dcorch_consts.ENDPOINT_TYPES_LIST))
len(dccommon_consts.ENDPOINT_TYPES_LIST))
endpoint_type1 = 'testendpoint1'
subcloud_status1 = self.create_subcloud_status(
@ -301,7 +301,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
self.ctx, endpoint_type=endpoint_type)
self.assertIsNotNone(subcloud_status)
sync_status = consts.SYNC_STATUS_IN_SYNC
sync_status = dccommon_consts.SYNC_STATUS_IN_SYNC
updated = db_api.subcloud_status_update(self.ctx, subcloud.id,
endpoint_type=endpoint_type,
sync_status=sync_status)
@ -335,7 +335,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
self.ctx, endpoint_type=endpoint_type3)
self.assertIsNotNone(subcloud_status)
sync_status = consts.SYNC_STATUS_IN_SYNC
sync_status = dccommon_consts.SYNC_STATUS_IN_SYNC
endpoint_type_list = [endpoint_type1, endpoint_type2]
db_api.subcloud_status_update_endpoints(self.ctx, subcloud.id,
endpoint_type_list=endpoint_type_list,
@ -374,7 +374,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
endpoint_type2 = 'testendpoint2'
sync_status = consts.SYNC_STATUS_IN_SYNC
sync_status = dccommon_consts.SYNC_STATUS_IN_SYNC
endpoint_type_list = [endpoint_type2]
self.assertRaises(exceptions.SubcloudStatusNotFound,
db_api.subcloud_status_update_endpoints,
@ -424,7 +424,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
subcloud.id)
num_default_subcloud_statuses = len(default_subcloud_statuses)
self.assertEqual(num_default_subcloud_statuses,
len(dcorch_consts.ENDPOINT_TYPES_LIST))
len(dccommon_consts.ENDPOINT_TYPES_LIST))
endpoint_type1 = 'testendpoint1'
subcloud_status1 = self.create_subcloud_status(

View File

@ -21,6 +21,7 @@ from oslo_concurrency import lockutils
from oslo_utils import timeutils
import sys
sys.modules['fm_core'] = mock.Mock()
import threading
@ -36,7 +37,6 @@ from dcmanager.state import subcloud_state_manager
from dcmanager.tests import base
from dcmanager.tests.unit.common import fake_subcloud
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
from tsconfig.tsconfig import SW_VERSION
@ -117,27 +117,27 @@ class FakeService(object):
FAKE_SERVICES = [
FakeService(
dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_PLATFORM,
1
),
FakeService(
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
2
),
FakeService(
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
3
),
FakeService(
dcorch_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_FM,
4
),
FakeService(
dcorch_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_NFV,
5
),
FakeService(
dcorch_consts.ENDPOINT_TYPE_DC_CERT,
dccommon_consts.ENDPOINT_TYPE_DC_CERT,
6
)
]
@ -308,11 +308,11 @@ class Subcloud(object):
self.description = data['description']
self.location = data['location']
self.software_version = data['software-version']
self.management_state = consts.MANAGEMENT_UNMANAGED
self.management_state = dccommon_consts.MANAGEMENT_UNMANAGED
if is_online:
self.availability_status = consts.AVAILABILITY_ONLINE
self.availability_status = dccommon_consts.AVAILABILITY_ONLINE
else:
self.availability_status = consts.AVAILABILITY_OFFLINE
self.availability_status = dccommon_consts.AVAILABILITY_OFFLINE
self.deploy_status = data['deploy_status']
self.management_subnet = data['management_subnet']
self.management_gateway_ip = data['management_gateway_address']
@ -600,7 +600,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_dcmanager_notification = FakeDCManagerNotifications()
@ -611,21 +611,21 @@ class TestSubcloudManager(base.DCManagerTestCase):
sm = subcloud_manager.SubcloudManager()
sm.update_subcloud(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location")
fake_dcmanager_notification.subcloud_managed.assert_called_once_with(
self.ctx, subcloud.name)
exclude_endpoints = [dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_LOAD]
exclude_endpoints = [dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_LOAD]
self.fake_dcmanager_audit_api.trigger_subcloud_audits.\
assert_called_once_with(self.ctx, subcloud.id, exclude_endpoints)
# Verify subcloud was updated with correct values
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(consts.MANAGEMENT_MANAGED,
self.assertEqual(dccommon_consts.MANAGEMENT_MANAGED,
updated_subcloud.management_state)
self.assertEqual("subcloud new description",
updated_subcloud.description)
@ -639,7 +639,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
sm = subcloud_manager.SubcloudManager()
fake_dcmanager_cermon_api = FakeDCManagerNotifications()
@ -650,21 +650,21 @@ class TestSubcloudManager(base.DCManagerTestCase):
sm.update_subcloud(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location",
data_install="install values")
fake_dcmanager_cermon_api.subcloud_managed.assert_called_once_with(
self.ctx, subcloud.name)
exclude_endpoints = [dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_LOAD]
exclude_endpoints = [dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_LOAD]
self.fake_dcmanager_audit_api.trigger_subcloud_audits.\
assert_called_once_with(self.ctx, subcloud.id, exclude_endpoints)
# Verify subcloud was updated with correct values
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(consts.MANAGEMENT_MANAGED,
self.assertEqual(dccommon_consts.MANAGEMENT_MANAGED,
updated_subcloud.management_state)
self.assertEqual("subcloud new description",
updated_subcloud.description)
@ -680,13 +680,13 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
sm = subcloud_manager.SubcloudManager()
self.assertRaises(exceptions.BadRequest,
sm.update_subcloud, self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
self.fake_dcmanager_audit_api.trigger_subcloud_audits.assert_not_called()
def test_update_already_unmanaged_subcloud(self):
@ -699,7 +699,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.assertRaises(exceptions.BadRequest,
sm.update_subcloud, self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_UNMANAGED)
management_state=dccommon_consts.MANAGEMENT_UNMANAGED)
self.fake_dcmanager_audit_api.trigger_subcloud_audits.assert_not_called()
def test_manage_when_deploy_status_failed(self):
@ -712,7 +712,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.assertRaises(exceptions.BadRequest,
sm.update_subcloud, self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
def test_manage_when_offline_without_force(self):
subcloud = self.create_subcloud_static(
@ -721,13 +721,13 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
availability_status=consts.AVAILABILITY_OFFLINE)
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
sm = subcloud_manager.SubcloudManager()
self.assertRaises(exceptions.SubcloudNotOnline,
sm.update_subcloud, self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
def test_manage_when_offline_with_force(self):
subcloud = self.create_subcloud_static(
@ -736,7 +736,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
availability_status=consts.AVAILABILITY_OFFLINE)
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
fake_dcmanager_cermon_api = FakeDCManagerNotifications()
@ -747,7 +747,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
sm = subcloud_manager.SubcloudManager()
sm.update_subcloud(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location",
data_install="install values",
@ -755,7 +755,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
# Verify subcloud was updated with correct values
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(consts.MANAGEMENT_MANAGED,
self.assertEqual(dccommon_consts.MANAGEMENT_MANAGED,
updated_subcloud.management_state)
self.assertEqual("subcloud new description",
updated_subcloud.description)
@ -771,7 +771,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_dcmanager_cermon_api = FakeDCManagerNotifications()
@ -782,7 +782,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
sm = subcloud_manager.SubcloudManager()
sm.update_subcloud(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location",
group_id=2)
@ -792,7 +792,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
# Verify subcloud was updated with correct values
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(consts.MANAGEMENT_MANAGED,
self.assertEqual(dccommon_consts.MANAGEMENT_MANAGED,
updated_subcloud.management_state)
self.assertEqual("subcloud new description",
updated_subcloud.description)
@ -806,30 +806,30 @@ class TestSubcloudManager(base.DCManagerTestCase):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud.management_state,
consts.MANAGEMENT_UNMANAGED)
dccommon_consts.MANAGEMENT_UNMANAGED)
self.assertEqual(subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
dccommon_consts.AVAILABILITY_OFFLINE)
# create sync statuses for endpoints
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
status = db_api.subcloud_status_create(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(status)
self.assertEqual(status.sync_status, consts.SYNC_STATUS_UNKNOWN)
self.assertEqual(status.sync_status, dccommon_consts.SYNC_STATUS_UNKNOWN)
# Update/verify each status with the default sync state: out-of-sync
ssm = subcloud_state_manager.SubcloudStateManager()
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
# Update
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
@ -840,160 +840,160 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Attempt to update each status to be in-sync for an offline/unmanaged
# subcloud. This is not allowed. Verify no change.
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_IN_SYNC)
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
# No change in status: Only online/managed clouds are updated
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Attempt to update each status to be unknown for an offline/unmanaged
# subcloud. This is allowed.
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=None,
sync_status=consts.SYNC_STATUS_UNKNOWN)
sync_status=dccommon_consts.SYNC_STATUS_UNKNOWN)
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
# Attempt to update each status to be out-of-sync for an
# offline/unmanaged subcloud. Exclude one endpoint. This is allowed.
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=None,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=[dcorch_consts.ENDPOINT_TYPE_DC_CERT])
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=[dccommon_consts.ENDPOINT_TYPE_DC_CERT])
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV]:
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Verify the dc-sync endpoint did not change
endpoint = dcorch_consts.ENDPOINT_TYPE_DC_CERT
endpoint = dccommon_consts.ENDPOINT_TYPE_DC_CERT
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
# Set/verify the subcloud is online/unmanaged
db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
subcloud = db_api.subcloud_get(self.ctx, subcloud.id)
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud.management_state,
consts.MANAGEMENT_UNMANAGED)
dccommon_consts.MANAGEMENT_UNMANAGED)
self.assertEqual(subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Attempt to update each status to be in-sync for an online/unmanaged
# subcloud. This is not allowed. Verify no change.
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV]:
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_IN_SYNC)
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
# No change in status: Only online/managed clouds are updated
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Attempt to update dc-cert status to be in-sync for an
# online/unmanaged subcloud. This is allowed. Verify the change.
endpoint = dcorch_consts.ENDPOINT_TYPE_DC_CERT
endpoint = dccommon_consts.ENDPOINT_TYPE_DC_CERT
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_IN_SYNC)
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Set/verify the subcloud is online/managed
db_api.subcloud_update(
self.ctx, subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
subcloud = db_api.subcloud_get(self.ctx, subcloud.id)
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud.management_state,
consts.MANAGEMENT_MANAGED)
dccommon_consts.MANAGEMENT_MANAGED)
self.assertEqual(subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Attempt to update each status to be in-sync for an online/managed
# subcloud
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_IN_SYNC)
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
updated_subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Change the sync status to 'out-of-sync' and verify fair lock access
# based on subcloud name for each update
with mock.patch.object(lockutils, 'internal_fair_lock') as mock_lock:
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Verify lock was called
mock_lock.assert_called_with(subcloud.name)
@ -1002,7 +1002,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
def test_update_subcloud_availability_go_online(self):
# create a subcloud
@ -1010,7 +1010,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
dccommon_consts.AVAILABILITY_OFFLINE)
fake_dcmanager_cermon_api = FakeDCManagerNotifications()
@ -1020,31 +1020,31 @@ class TestSubcloudManager(base.DCManagerTestCase):
ssm = subcloud_state_manager.SubcloudStateManager()
db_api.subcloud_update(self.ctx, subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
# create sync statuses for endpoints
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
status = db_api.subcloud_status_create(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(status)
self.assertEqual(status.sync_status, consts.SYNC_STATUS_UNKNOWN)
self.assertEqual(status.sync_status, dccommon_consts.SYNC_STATUS_UNKNOWN)
ssm.update_subcloud_availability(self.ctx, subcloud.name,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
# Verify the subcloud was set to online
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Verify notifying dcorch
self.fake_dcorch_api.update_subcloud_states.assert_called_once_with(
self.ctx, subcloud.name, updated_subcloud.management_state,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Verify triggering audits
self.fake_dcmanager_audit_api.trigger_subcloud_audits.\
assert_called_once_with(self.ctx, subcloud.id)
@ -1057,7 +1057,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
dccommon_consts.AVAILABILITY_OFFLINE)
fake_dcmanager_cermon_api = FakeDCManagerNotifications()
@ -1070,28 +1070,28 @@ class TestSubcloudManager(base.DCManagerTestCase):
# Note that we have intentionally left the subcloud as "unmanaged"
# create sync statuses for endpoints
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV,
dcorch_consts.ENDPOINT_TYPE_DC_CERT]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV,
dccommon_consts.ENDPOINT_TYPE_DC_CERT]:
status = db_api.subcloud_status_create(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(status)
self.assertEqual(status.sync_status, consts.SYNC_STATUS_UNKNOWN)
self.assertEqual(status.sync_status, dccommon_consts.SYNC_STATUS_UNKNOWN)
ssm.update_subcloud_availability(self.ctx, subcloud.name,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
# Verify the subcloud was set to online
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Verify notifying dcorch
self.fake_dcorch_api.update_subcloud_states.assert_called_once_with(
self.ctx, subcloud.name, updated_subcloud.management_state,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Verify triggering audits
self.fake_dcmanager_audit_api.trigger_subcloud_audits.\
assert_called_once_with(self.ctx, subcloud.id)
@ -1104,23 +1104,23 @@ class TestSubcloudManager(base.DCManagerTestCase):
# Set the subcloud to online/managed
db_api.subcloud_update(self.ctx, subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
ssm = subcloud_state_manager.SubcloudStateManager()
# create sync statuses for endpoints and set them to in-sync
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV]:
for endpoint in [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_FM,
dccommon_consts.ENDPOINT_TYPE_NFV]:
db_api.subcloud_status_create(
self.ctx, subcloud.id, endpoint)
ssm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_IN_SYNC)
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
# We trigger a subcloud audits after updating the identity from unknown
# to in-sync
@ -1135,7 +1135,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
# Verify the subcloud availability was not updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
dccommon_consts.AVAILABILITY_ONLINE)
# Verify dcorch was not notified
self.fake_dcorch_api.update_subcloud_states.assert_not_called()
# Verify the audit_fail_count was updated
@ -1145,52 +1145,52 @@ class TestSubcloudManager(base.DCManagerTestCase):
# Audit fails again
audit_fail_count = audit_fail_count + 1
ssm.update_subcloud_availability(self.ctx, subcloud.name,
consts.AVAILABILITY_OFFLINE,
dccommon_consts.AVAILABILITY_OFFLINE,
audit_fail_count=audit_fail_count)
# Verify the subcloud availability was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
dccommon_consts.AVAILABILITY_OFFLINE)
# Verify notifying dcorch
self.fake_dcorch_api.update_subcloud_states.assert_called_once_with(
self.ctx, subcloud.name, updated_subcloud.management_state,
consts.AVAILABILITY_OFFLINE)
dccommon_consts.AVAILABILITY_OFFLINE)
# Verify all endpoint statuses set to unknown
for subcloud, subcloud_status in db_api. \
subcloud_get_with_status(self.ctx, subcloud.id):
self.assertIsNotNone(subcloud_status)
self.assertEqual(subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
def test_update_subcloud_identity_endpoint(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
for endpoint_type in dcorch_consts.ENDPOINT_TYPES_LIST:
for endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint_type)
self.assertIsNotNone(subcloud_status)
self.assertEqual(subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
ssm = subcloud_state_manager.SubcloudStateManager()
# Set the subcloud to online/managed
db_api.subcloud_update(self.ctx, subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Update identity endpoints statuses
endpoint = dcorch_consts.ENDPOINT_TYPE_IDENTITY
for original_sync_status in [consts.SYNC_STATUS_IN_SYNC,
consts.SYNC_STATUS_OUT_OF_SYNC,
consts.SYNC_STATUS_UNKNOWN]:
endpoint = dccommon_consts.ENDPOINT_TYPE_IDENTITY
for original_sync_status in [dccommon_consts.SYNC_STATUS_IN_SYNC,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
dccommon_consts.SYNC_STATUS_UNKNOWN]:
for new_sync_status in [consts.SYNC_STATUS_IN_SYNC,
consts.SYNC_STATUS_OUT_OF_SYNC,
consts.SYNC_STATUS_UNKNOWN]:
for new_sync_status in [dccommon_consts.SYNC_STATUS_IN_SYNC,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
dccommon_consts.SYNC_STATUS_UNKNOWN]:
# Update identity to the original status
ssm.update_subcloud_endpoint_status(
@ -1213,8 +1213,8 @@ class TestSubcloudManager(base.DCManagerTestCase):
trigger_count = new_trigger_subcloud_patch_load_audits_count - \
original_trigger_subcloud_patch_load_audits_count
if original_sync_status == consts.SYNC_STATUS_UNKNOWN and \
new_sync_status != consts.SYNC_STATUS_UNKNOWN:
if original_sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN and \
new_sync_status != dccommon_consts.SYNC_STATUS_UNKNOWN:
# Verify the subcloud patch and load audit is triggered once
self.assertEqual(trigger_count, 1)
else:
@ -1246,7 +1246,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(subcloud_status)
self.assertEqual(subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
# Verify the subcloud openstack_installed was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
@ -1611,7 +1611,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
deploy_status=consts.DEPLOY_STATE_DONE)
db_api.subcloud_update(self.ctx,
subcloud.id,
management_state=consts.MANAGEMENT_MANAGED)
management_state=dccommon_consts.MANAGEMENT_MANAGED)
fake_dcmanager_cermon_api = FakeDCManagerNotifications()

View File

@ -1,11 +1,12 @@
#
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
import uuid
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
@ -26,7 +27,7 @@ class FakeController(object):
hostname='controller-0',
administrative=consts.ADMIN_UNLOCKED,
operational=consts.OPERATIONAL_ENABLED,
availability=consts.AVAILABILITY_ONLINE,
availability=dccommon_consts.AVAILABILITY_ONLINE,
ihost_action=None,
target_load=UPGRADED_VERSION,
software_load=PREVIOUS_VERSION,

View File

@ -1,10 +1,11 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.tests.unit.common import fake_strategy
@ -452,7 +453,7 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage):
# Update the subcloud to be online
db_api.subcloud_update(self.ctx,
self.subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Create a fake strategy
fake_strategy.create_fake_strategy_step(
@ -561,7 +562,7 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage):
db_api.subcloud_update(self.ctx,
self.subcloud.id,
deploy_status=consts.DEPLOY_STATE_INSTALLED,
availability_status=consts.AVAILABILITY_OFFLINE)
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
# invoke the strategy state operation on the orch thread
self.worker.perform_state_action(self.strategy_step)
@ -583,7 +584,7 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage):
db_api.subcloud_update(self.ctx,
self.subcloud.id,
deploy_status=consts.DEPLOY_STATE_DATA_MIGRATION_FAILED,
availability_status=consts.AVAILABILITY_OFFLINE)
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
# invoke the strategy state operation on the orch thread
self.worker.perform_state_action(self.strategy_step)
@ -605,7 +606,7 @@ class TestSwUpgradePreCheckSimplexStage(TestSwUpgradePreCheckStage):
db_api.subcloud_update(self.ctx,
self.subcloud.id,
deploy_status=consts.DEPLOY_STATE_BOOTSTRAP_FAILED,
availability_status=consts.AVAILABILITY_OFFLINE)
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
# invoke the strategy state operation on the orch thread
self.worker.perform_state_action(self.strategy_step)

View File

@ -15,6 +15,7 @@ import mock
from oslo_config import cfg
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.db.sqlalchemy import api as db_api
@ -188,8 +189,8 @@ class TestSwUpdate(base.DCManagerTestCase):
return db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
def setup_strategy_step(self, strategy_state):
fake_strategy.create_fake_strategy_step(

View File

@ -20,6 +20,7 @@ import threading
from oslo_config import cfg
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
@ -32,7 +33,6 @@ from dcmanager.tests import base
from dcmanager.tests.unit.common import fake_strategy
from dcmanager.tests.unit.common import fake_subcloud
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
OAM_FLOATING_IP = '10.10.10.12'
@ -98,13 +98,13 @@ class Subcloud(object):
self.software_version = '12.04'
self.group_id = group_id
if is_managed:
self.management_state = consts.MANAGEMENT_MANAGED
self.management_state = dccommon_consts.MANAGEMENT_MANAGED
else:
self.management_state = consts.MANAGEMENT_UNMANAGED
self.management_state = dccommon_consts.MANAGEMENT_UNMANAGED
if is_online:
self.availability_status = consts.AVAILABILITY_ONLINE
self.availability_status = dccommon_consts.AVAILABILITY_ONLINE
else:
self.availability_status = consts.AVAILABILITY_OFFLINE
self.availability_status = dccommon_consts.AVAILABILITY_OFFLINE
class StrategyStep(object):
@ -135,7 +135,7 @@ class FakePatchingClientOutOfSync(mock.Mock):
def query(self, state=None):
if state == 'Committed':
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
return {'DC.3': {'sw_version': '17.07',
'repostate': 'Committed',
'patchstate': 'Committed'}
@ -143,7 +143,7 @@ class FakePatchingClientOutOfSync(mock.Mock):
else:
return {}
else:
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
return {'DC.1': {'sw_version': '17.07',
'repostate': 'Applied',
'patchstate': 'Applied'},
@ -193,7 +193,7 @@ class FakePatchingClientSubcloudCommitted(mock.Mock):
def query(self, state=None):
if state == 'Committed':
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
return {'DC.3': {'sw_version': '17.07',
'repostate': 'Committed',
'patchstate': 'Committed'}
@ -207,7 +207,7 @@ class FakePatchingClientSubcloudCommitted(mock.Mock):
else:
return {}
else:
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
return {'DC.1': {'sw_version': '17.07',
'repostate': 'Applied',
'patchstate': 'Applied'},
@ -257,7 +257,7 @@ class FakePatchingClientSubcloudUnknown(mock.Mock):
def query(self, state=None):
if state == 'Committed':
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
return {'DC.3': {'sw_version': '17.07',
'repostate': 'Committed',
'patchstate': 'Committed'}
@ -265,7 +265,7 @@ class FakePatchingClientSubcloudUnknown(mock.Mock):
else:
return {}
else:
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
return {'DC.1': {'sw_version': '17.07',
'repostate': 'Applied',
'patchstate': 'Applied'},
@ -314,7 +314,7 @@ class FakePatchingClientAvailable(mock.Mock):
self.endpoint = endpoint
def query(self, state=None):
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
if state == 'Committed':
return {'DC.1': {'sw_version': '17.07',
'repostate': 'Committed',
@ -348,7 +348,7 @@ class FakePatchingClientFinish(mock.Mock):
self.endpoint = endpoint
def query(self, state=None):
if self.region == consts.DEFAULT_REGION_NAME:
if self.region == dccommon_consts.DEFAULT_REGION_NAME:
if state == 'Committed':
return {'DC.2': {'sw_version': '17.07',
'repostate': 'Committed',
@ -541,11 +541,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
}
subcloud = db_api.subcloud_create(ctxt, **values)
if is_managed:
state = consts.MANAGEMENT_MANAGED
state = dccommon_consts.MANAGEMENT_MANAGED
subcloud = db_api.subcloud_update(ctxt, subcloud.id,
management_state=state)
if is_online:
status = consts.AVAILABILITY_ONLINE
status = dccommon_consts.AVAILABILITY_ONLINE
subcloud = db_api.subcloud_update(ctxt, subcloud.id,
availability_status=status)
return subcloud
@ -567,11 +567,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
if endpoint:
endpoint_type = endpoint
else:
endpoint_type = dcorch_consts.ENDPOINT_TYPE_PATCHING
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PATCHING
if status:
sync_status = status
else:
sync_status = consts.SYNC_STATUS_OUT_OF_SYNC
sync_status = dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
subcloud_status = db_api.subcloud_status_update(ctxt,
subcloud_id,
@ -753,13 +753,13 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud1.id,
endpoint=dcorch_consts.ENDPOINT_TYPE_LOAD)
endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD)
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2',
self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud2.id,
endpoint=dcorch_consts.ENDPOINT_TYPE_LOAD)
endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD)
data = copy.copy(FAKE_SW_UPDATE_DATA)
data["type"] = consts.SW_UPDATE_TYPE_UPGRADE
@ -796,13 +796,13 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud1.id,
endpoint=dcorch_consts.ENDPOINT_TYPE_LOAD)
endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD)
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2',
self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud2.id,
endpoint=dcorch_consts.ENDPOINT_TYPE_LOAD)
endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD)
mock_global_prestage_validate.return_value = None
mock_initial_subcloud_validate.return_value = None
@ -845,15 +845,15 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Subcloud2 will be prestaged load is None
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud2.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.ENDPOINT_TYPE_LOAD,
None)
# Subcloud3 will be prestaged load out of sync
@ -861,16 +861,16 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud3.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud4 will be prestaged sync unknown
fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 1,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_UNKNOWN)
mock_global_prestage_validate.return_value = None
mock_initial_subcloud_validate.return_value = None
@ -910,13 +910,13 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud1.id,
endpoint=dcorch_consts.ENDPOINT_TYPE_LOAD)
endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD)
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2',
self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud2.id,
endpoint=dcorch_consts.ENDPOINT_TYPE_LOAD)
endpoint=dccommon_consts.ENDPOINT_TYPE_LOAD)
mock_initial_subcloud_validate.return_value = None
mock_controller_upgrade.return_value = list()
@ -974,7 +974,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
None,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Subcloud5 will be patched
fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2,
is_managed=True, is_online=True)
@ -1017,8 +1017,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 2 will not be patched because it is offline
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2',
@ -1026,8 +1026,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=False)
self.update_subcloud_status(self.ctxt, fake_subcloud2.id,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 3 will be patched
fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3',
@ -1035,8 +1035,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud3.id,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 4 will not be patched because it is in sync
fake_subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4',
@ -1044,8 +1044,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt, fake_subcloud4.id,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.SYNC_STATUS_IN_SYNC)
data = copy.copy(FAKE_SW_PATCH_DATA)
data["type"] = consts.SW_UPDATE_TYPE_PATCH
@ -1164,7 +1164,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
None,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Subcloud5 will be patched
fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2,
is_managed=True, is_online=True)
@ -1224,7 +1224,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
None,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Subcloud5 will be patched
fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2,
is_managed=True, is_online=True)
@ -1317,7 +1317,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
None,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Subcloud5 will be patched
fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2,
is_managed=True, is_online=True)
@ -1411,7 +1411,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
None,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.SYNC_STATUS_IN_SYNC)
# Subcloud5 will be patched
fake_subcloud5 = self.create_subcloud(self.ctxt, 'subcloud5', 2,
is_managed=True, is_online=True)
@ -1501,7 +1501,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.update_subcloud_status(self.ctxt,
fake_subcloud4.id,
None,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.SYNC_STATUS_UNKNOWN)
um = sw_update_manager.SwUpdateManager()
self.assertRaises(exceptions.BadRequest,
@ -1598,24 +1598,24 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=False)
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 2 will be upgraded
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud2.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 3 will not be upgraded because it is already load in-sync
fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud3.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_IN_SYNC)
data = copy.copy(FAKE_SW_UPDATE_DATA)
data["type"] = consts.SW_UPDATE_TYPE_UPGRADE
@ -1646,24 +1646,24 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=False)
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 2 will be upgraded
fake_subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud2.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
# Subcloud 3 will not be upgraded because it is already load in-sync
fake_subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', self.fake_group3.id,
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud3.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_IN_SYNC)
data = copy.copy(FAKE_SW_UPDATE_DATA)
data["type"] = consts.SW_UPDATE_TYPE_UPGRADE
@ -1695,8 +1695,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=False)
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_UNKNOWN)
um = sw_update_manager.SwUpdateManager()
data = copy.copy(FAKE_SW_UPDATE_DATA)
@ -1730,8 +1730,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=False)
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_IN_SYNC)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_IN_SYNC)
um = sw_update_manager.SwUpdateManager()
data = copy.copy(FAKE_SW_UPDATE_DATA)
@ -1755,8 +1755,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
is_managed=True, is_online=True)
self.update_subcloud_status(self.ctxt,
fake_subcloud1.id,
dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.ENDPOINT_TYPE_LOAD,
dccommon_consts.SYNC_STATUS_UNKNOWN)
um = sw_update_manager.SwUpdateManager()
data = copy.copy(FAKE_SW_UPDATE_DATA)
@ -1903,8 +1903,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -1944,8 +1944,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -1985,8 +1985,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -2026,8 +2026,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -2067,8 +2067,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -2118,8 +2118,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -2158,8 +2158,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -2197,8 +2197,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
subcloud = db_api.subcloud_update(
self.ctx,
subcloud_id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
fake_strategy.create_fake_strategy_step(
self.ctx,
subcloud_id=subcloud.id,
@ -2244,7 +2244,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
regionone_patches = dict()
regionone_patches = \
FakePatchingClientOutOfSync(
consts.DEFAULT_REGION_NAME, mock.Mock(), mock.Mock()).query()
dccommon_consts.DEFAULT_REGION_NAME, mock.Mock(), mock.Mock()).query()
regionone_applied_patch_ids = [
patch_id for patch_id in regionone_patches.keys()
if regionone_patches[patch_id]['repostate'] in [
@ -2258,7 +2258,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
regionone_committed_patches = \
FakePatchingClientOutOfSync(
consts.DEFAULT_REGION_NAME, mock.Mock(), mock.Mock()
dccommon_consts.DEFAULT_REGION_NAME, mock.Mock(), mock.Mock()
).query('Committed')
regionone_committed_patch_ids = [
patch_id for patch_id in regionone_committed_patches]

View File

@ -1,4 +1,4 @@
# Copyright 2017-2019 Wind River
# Copyright 2017-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -15,6 +15,7 @@
import routes
from dccommon import consts as dccommon_consts
from dcorch.api.proxy.apps.controller import CinderAPIController
from dcorch.api.proxy.apps.controller import ComputeAPIController
from dcorch.api.proxy.apps.controller import IdentityAPIController
@ -44,10 +45,10 @@ class Acceptor(Router):
self._default_dispatcher = APIDispatcher(app)
self.forwarder_map = {
consts.ENDPOINT_TYPE_COMPUTE: self._default_dispatcher,
consts.ENDPOINT_TYPE_PLATFORM: self._default_dispatcher,
dccommon_consts.ENDPOINT_TYPE_PLATFORM: self._default_dispatcher,
consts.ENDPOINT_TYPE_VOLUME: self._default_dispatcher,
consts.ENDPOINT_TYPE_NETWORK: self._default_dispatcher,
consts.ENDPOINT_TYPE_IDENTITY: self._default_dispatcher,
dccommon_consts.ENDPOINT_TYPE_IDENTITY: self._default_dispatcher,
}
if CONF.type in self.forwarder_map:
forwarder = self.forwarder_map[CONF.type]
@ -56,11 +57,11 @@ class Acceptor(Router):
self.route_map = {
consts.ENDPOINT_TYPE_COMPUTE: self.add_compute_routes,
consts.ENDPOINT_TYPE_PLATFORM: self.add_platform_routes,
dccommon_consts.ENDPOINT_TYPE_PLATFORM: self.add_platform_routes,
consts.ENDPOINT_TYPE_VOLUME: self.add_volume_routes,
consts.ENDPOINT_TYPE_NETWORK: self.add_network_routes,
consts.ENDPOINT_TYPE_PATCHING: self.add_patch_routes,
consts.ENDPOINT_TYPE_IDENTITY: self.add_identity_routes,
dccommon_consts.ENDPOINT_TYPE_PATCHING: self.add_patch_routes,
dccommon_consts.ENDPOINT_TYPE_IDENTITY: self.add_identity_routes,
}
self._conf = conf
mapper = routes.Mapper()

View File

@ -1,4 +1,4 @@
# Copyright 2017-2021 Wind River
# Copyright 2017-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -23,9 +23,9 @@ import tsconfig.tsconfig as tsc
import webob.dec
import webob.exc
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcmanager.common import consts as dcmanager_consts
from dcmanager.rpc import client as dcmanager_rpc_client
from dcorch.api.proxy.apps.dispatcher import APIDispatcher
from dcorch.api.proxy.apps.proxy import Proxy
@ -380,7 +380,7 @@ class ComputeAPIController(APIController):
class SysinvAPIController(APIController):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_PLATFORM
ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_PLATFORM
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
webob.exc.HTTPAccepted.code,
@ -432,8 +432,8 @@ class SysinvAPIController(APIController):
def _notify_dcmanager_firmware(self, request, response):
return self._notify_dcmanager(request,
response,
consts.ENDPOINT_TYPE_FIRMWARE,
dcmanager_consts.SYNC_STATUS_UNKNOWN)
dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
dccommon_consts.SYNC_STATUS_UNKNOWN)
def _process_response(self, environ, request, response):
try:
@ -537,10 +537,10 @@ class SysinvAPIController(APIController):
elif len(os.listdir(proxy_consts.LOAD_VAULT_DIR)) == 0:
try:
ks_client = OpenStackDriver(
region_name=dcmanager_consts.DEFAULT_REGION_NAME,
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
sysinv_client = SysinvClient(
dcmanager_consts.DEFAULT_REGION_NAME, ks_client.session,
dccommon_consts.DEFAULT_REGION_NAME, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
loads = sysinv_client.get_loads()
except Exception:
@ -764,7 +764,7 @@ class SysinvAPIController(APIController):
class IdentityAPIController(APIController):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_IDENTITY
ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_IDENTITY
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
webob.exc.HTTPCreated.code,

View File

@ -1,4 +1,4 @@
# Copyright 2018-2020 Wind River
# Copyright 2018-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -23,12 +23,12 @@ import webob.dec
import webob.exc
from cgcs_patch.patch_functions import get_release_from_patch
from dcmanager.common import consts as dcmanager_consts
from dccommon import consts as dccommon_consts
from dcorch.api.proxy.apps.dispatcher import APIDispatcher
from dcorch.api.proxy.common import constants as proxy_consts
from dcorch.api.proxy.common.service import Middleware
from dcorch.api.proxy.common import utils as proxy_utils
from dcorch.common import consts
from dcorch.common import context
from oslo_config import cfg
from oslo_log import log as logging
@ -52,7 +52,7 @@ CONF.register_opts(patch_opts, CONF.type)
class PatchAPIController(Middleware):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_PATCHING
ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_PATCHING
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
]
@ -189,7 +189,7 @@ class PatchAPIController(Middleware):
self.dcmanager_state_rpc_client.update_subcloud_endpoint_status(
self.ctxt,
endpoint_type=self.ENDPOINT_TYPE,
sync_status=dcmanager_consts.SYNC_STATUS_UNKNOWN)
sync_status=dccommon_consts.SYNC_STATUS_UNKNOWN)
return response
def patch_delete_req(self, request, response):

View File

@ -1,4 +1,4 @@
# Copyright 2017-2020 Wind River
# Copyright 2017-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -12,7 +12,7 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dccommon import consts as dccommon_consts
from dcorch.common import consts
# Version could be any of the following: /, /v1, /v1/
@ -319,7 +319,7 @@ ROUTE_METHOD_MAP = {
QUOTA_RESOURCE_TAG: ['PUT', 'DELETE', 'GET'],
QUOTA_CLASS_RESOURCE_TAG: ['PUT'],
},
consts.ENDPOINT_TYPE_PLATFORM: {
dccommon_consts.ENDPOINT_TYPE_PLATFORM: {
consts.RESOURCE_TYPE_SYSINV_DNS: ['PATCH', 'PUT'],
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE: ['POST', 'DELETE'],
consts.RESOURCE_TYPE_SYSINV_USER: ['PATCH', 'PUT'],
@ -332,7 +332,7 @@ ROUTE_METHOD_MAP = {
consts.RESOURCE_TYPE_NETWORK_QUOTA_SET: ['PUT', 'DELETE'],
consts.RESOURCE_TYPE_QOS_POLICY: ['POST', 'PUT', 'DELETE'],
},
consts.ENDPOINT_TYPE_PATCHING: {
dccommon_consts.ENDPOINT_TYPE_PATCHING: {
PATCH_ACTION_GET_VERSION: ['GET'],
PATCH_ACTION_UPLOAD: ['POST'],
PATCH_ACTION_UPLOAD_DIR: ['POST'],
@ -345,7 +345,7 @@ ROUTE_METHOD_MAP = {
PATCH_ACTION_WHAT_REQS: ['GET'],
PATCH_ACTION_QUERY_DEPS: ['GET'],
},
consts.ENDPOINT_TYPE_IDENTITY: {
dccommon_consts.ENDPOINT_TYPE_IDENTITY: {
consts.RESOURCE_TYPE_IDENTITY_USERS:
['POST', 'PATCH', 'DELETE'],
consts.RESOURCE_TYPE_IDENTITY_GROUPS:

View File

@ -1,4 +1,4 @@
# Copyright 2017, 2021 Wind River
# Copyright 2017-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -38,15 +38,15 @@ def is_space_available(partition, size):
def get_host_port_options(cfg):
if cfg.type == consts.ENDPOINT_TYPE_COMPUTE:
return cfg.compute.bind_host, cfg.compute.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_PLATFORM:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PLATFORM:
return cfg.platform.bind_host, cfg.platform.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_NETWORK:
return cfg.network.bind_host, cfg.network.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_PATCHING:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PATCHING:
return cfg.patching.bind_host, cfg.patching.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_VOLUME:
return cfg.volume.bind_host, cfg.volume.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_IDENTITY:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_IDENTITY:
return cfg.identity.bind_host, cfg.identity.bind_port
else:
LOG.error("Type: %s is undefined! Ignoring", cfg.type)
@ -56,15 +56,15 @@ def get_host_port_options(cfg):
def get_remote_host_port_options(cfg):
if cfg.type == consts.ENDPOINT_TYPE_COMPUTE:
return cfg.compute.remote_host, cfg.compute.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_PLATFORM:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PLATFORM:
return cfg.platform.remote_host, cfg.platform.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_NETWORK:
return cfg.network.remote_host, cfg.network.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_PATCHING:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PATCHING:
return cfg.patching.remote_host, cfg.patching.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_VOLUME:
return cfg.volume.remote_host, cfg.volume.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_IDENTITY:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_IDENTITY:
return cfg.identity.remote_host, cfg.identity.remote_port
else:
LOG.error("Type: %s is undefined! Ignoring", cfg.type)
@ -74,15 +74,15 @@ def get_remote_host_port_options(cfg):
def get_sync_endpoint(cfg):
if cfg.type == consts.ENDPOINT_TYPE_COMPUTE:
return cfg.compute.sync_endpoint
elif cfg.type == consts.ENDPOINT_TYPE_PLATFORM:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PLATFORM:
return cfg.platform.sync_endpoint
elif cfg.type == consts.ENDPOINT_TYPE_NETWORK:
return cfg.network.sync_endpoint
elif cfg.type == consts.ENDPOINT_TYPE_PATCHING:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PATCHING:
return cfg.patching.sync_endpoint
elif cfg.type == consts.ENDPOINT_TYPE_VOLUME:
return cfg.volume.sync_endpoint
elif cfg.type == consts.ENDPOINT_TYPE_IDENTITY:
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_IDENTITY:
return cfg.identity.sync_endpoint
else:
LOG.error("Type: %s is undefined! Ignoring", cfg.type)

View File

@ -1,4 +1,5 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -18,6 +19,7 @@
import eventlet
eventlet.monkey_patch(os=False)
import os
@ -30,13 +32,13 @@ from oslo_service import wsgi
import logging as std_logging
from dccommon import consts
from dcmanager.common import messaging as dcmanager_messaging
from dcorch.api import api_config
from dcorch.api import app
from dcorch.api.proxy.common import constants
from dcorch.common import config
from dcorch.common import consts
from dcorch.common import messaging
from dcorch.api.proxy.common import utils

View File

@ -1,5 +1,5 @@
# Copyright (c) 2016 Ericsson AB.
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -12,44 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
NOVA_QUOTA_FIELDS = ("metadata_items",
"cores",
"instances",
"ram",
"key_pairs",
"injected_files",
"injected_file_path_bytes",
"injected_file_content_bytes",
"server_group_members",
"server_groups",)
CINDER_QUOTA_FIELDS = ("volumes",
"volumes_iscsi",
"volumes_ceph",
"per_volume_gigabytes",
"groups",
"snapshots",
"snapshots_iscsi",
"snapshots_ceph",
"gigabytes",
"gigabytes_iscsi",
"gigabytes_ceph",
"backups",
"backup_gigabytes")
NEUTRON_QUOTA_FIELDS = ("network",
"subnet",
"subnetpool",
"rbac_policy",
"trunk",
"port",
"router",
"floatingip",
"security_group",
"security_group_rule",
)
from dccommon import consts as dccommon_consts
JOB_PROGRESS = "IN_PROGRESS"
@ -113,49 +76,18 @@ KEYPAIR_ID_DELIM = "/"
SHARED_CONFIG_STATE_MANAGED = "managed"
SHARED_CONFIG_STATE_UNMANAGED = "unmanaged"
ENDPOINT_TYPE_PLATFORM = "platform"
ENDPOINT_TYPE_VOLUME = "volume"
ENDPOINT_TYPE_COMPUTE = "compute"
ENDPOINT_TYPE_NETWORK = "network"
ENDPOINT_TYPE_PATCHING = "patching"
ENDPOINT_TYPE_IDENTITY = "identity"
ENDPOINT_TYPE_FM = "faultmanagement"
ENDPOINT_TYPE_NFV = "nfv"
ENDPOINT_TYPE_LOAD = "load"
ENDPOINT_TYPE_DC_CERT = 'dc-cert'
ENDPOINT_TYPE_FIRMWARE = 'firmware'
ENDPOINT_TYPE_KUBERNETES = 'kubernetes'
ENDPOINT_TYPE_KUBE_ROOTCA = 'kube-rootca'
# All endpoint types
ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_PATCHING,
ENDPOINT_TYPE_IDENTITY,
ENDPOINT_TYPE_LOAD,
ENDPOINT_TYPE_DC_CERT,
ENDPOINT_TYPE_FIRMWARE,
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA]
# All endpoint audit requests
# TODO(yuxing): move some constants to dccommon as part of general refactoring
# for maintainability in a future commit.
ENDPOINT_AUDIT_REQUESTS = {
ENDPOINT_TYPE_FIRMWARE: 'firmware_audit_requested',
ENDPOINT_TYPE_KUBERNETES: 'kubernetes_audit_requested',
ENDPOINT_TYPE_KUBE_ROOTCA: 'kube_rootca_update_audit_requested',
ENDPOINT_TYPE_LOAD: 'load_audit_requested',
ENDPOINT_TYPE_PATCHING: 'patch_audit_requested',
}
# Dcorch sync endpoint types
SYNC_ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_IDENTITY]
SYNC_ENDPOINT_TYPES_LIST = [dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY]
ENDPOINT_QUOTA_MAPPING = {
ENDPOINT_TYPE_COMPUTE: NOVA_QUOTA_FIELDS,
ENDPOINT_TYPE_NETWORK: NEUTRON_QUOTA_FIELDS,
ENDPOINT_TYPE_VOLUME: CINDER_QUOTA_FIELDS,
ENDPOINT_TYPE_COMPUTE: dccommon_consts.NOVA_QUOTA_FIELDS,
ENDPOINT_TYPE_NETWORK: dccommon_consts.NEUTRON_QUOTA_FIELDS,
ENDPOINT_TYPE_VOLUME: dccommon_consts.CINDER_QUOTA_FIELDS,
}
# DB sync agent endpoint

View File

@ -1,4 +1,5 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2018-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -16,6 +17,7 @@
import itertools
import six.moves
from dccommon import consts as dccommon_consts
from dcorch.common import consts
from dcorch.common import exceptions
from dcorch.objects import orchjob
@ -42,9 +44,9 @@ def get_batch_projects(batch_size, project_list, fillvalue=None):
def validate_quota_limits(payload):
for rsrc in payload:
# Check valid resource name
if rsrc not in itertools.chain(consts.CINDER_QUOTA_FIELDS,
consts.NOVA_QUOTA_FIELDS,
consts.NEUTRON_QUOTA_FIELDS):
if rsrc not in itertools.chain(dccommon_consts.CINDER_QUOTA_FIELDS,
dccommon_consts.NOVA_QUOTA_FIELDS,
dccommon_consts.NEUTRON_QUOTA_FIELDS):
raise exceptions.InvalidInputError
# Check valid quota limit value in case for put/post
if isinstance(payload, dict) and (not isinstance(

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -28,7 +28,7 @@ from sqlalchemy import ForeignKey, UniqueConstraint
from sqlalchemy.types import TypeDecorator, VARCHAR
from sqlalchemy.orm import relationship
from dcmanager.common import consts as dcm_consts
from dccommon import consts as dccommon_consts
from dcorch.common import consts
BASE = declarative_base()
@ -175,7 +175,7 @@ class Subcloud(BASE, OrchestratorBase):
# default management_state is None; could be set to 'deleting'
management_state = Column('management_state', String(64))
availability_status = Column('availability_status', String(64),
default=dcm_consts.AVAILABILITY_OFFLINE)
default=dccommon_consts.AVAILABILITY_OFFLINE)
capabilities = Column(JSONEncodedDict)
initial_sync_state = Column('initial_sync_state', String(64),
default=consts.INITIAL_SYNC_STATE_NONE)

View File

@ -1,5 +1,6 @@
# Copyright 2016 Ericsson AB
# Copyright (c) 2018-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -15,7 +16,7 @@ import collections
from oslo_log import log
from dcorch.common import consts
from dccommon import consts
from dcorch.common import exceptions
from dcorch.drivers import base

View File

@ -1,4 +1,4 @@
# Copyright 2018-2021 Wind River
# Copyright 2018-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -49,7 +49,7 @@ class FernetKeyManager(manager.Manager):
*args, **kwargs)
self.gsm = gsm
self.context = context.get_admin_context()
self.endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
self.endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
self.resource_type = consts.RESOURCE_TYPE_SYSINV_FERNET_REPO
@classmethod

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
import eventlet
@ -23,7 +23,6 @@ from oslo_utils import timeutils
import random
from dccommon import consts as dccommon_consts
from dcmanager.common import consts as dcm_consts
from dcorch.common import consts as dco_consts
from dcorch.common import context
from dcorch.common import exceptions
@ -43,8 +42,8 @@ AUDIT_INTERVAL = 1200 # Default audit interval
# sync object endpoint type and subclass mappings
sync_object_class_map = {
dco_consts.ENDPOINT_TYPE_PLATFORM: SysinvSyncThread,
dco_consts.ENDPOINT_TYPE_IDENTITY: IdentitySyncThread,
dccommon_consts.ENDPOINT_TYPE_PLATFORM: SysinvSyncThread,
dccommon_consts.ENDPOINT_TYPE_IDENTITY: IdentitySyncThread,
dccommon_consts.ENDPOINT_TYPE_IDENTITY_OS: IdentitySyncThread
}
@ -130,8 +129,8 @@ class GenericSyncManager(object):
#
subclouds = db_api.subcloud_get_all(
self.context,
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=dco_consts.INITIAL_SYNC_STATE_COMPLETED)
# randomize to reduce likelihood of sync_lock contention
random.shuffle(subclouds)
@ -238,8 +237,8 @@ class GenericSyncManager(object):
# first update the state of the subcloud
self.update_subcloud_state(
subcloud_name,
management_state=dcm_consts.MANAGEMENT_UNMANAGED,
availability_status=dcm_consts.AVAILABILITY_OFFLINE)
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
# shutdown, optionally deleting queued work
if subcloud_name not in self.sync_objs:
LOG.error("Subcloud %s sync_objs do not exist" % subcloud_name)
@ -255,7 +254,7 @@ class GenericSyncManager(object):
# Someone has enqueued a sync job. set the endpoint sync_request to
# requested
subclouds = db_api.subcloud_get_all(
ctxt, management_state=dcm_consts.MANAGEMENT_MANAGED)
ctxt, management_state=dccommon_consts.MANAGEMENT_MANAGED)
for sc in subclouds:
GenericSyncManager.set_sync_request(ctxt, sc.region_name,
endpoint_type)
@ -337,14 +336,14 @@ class GenericSyncManager(object):
def is_subcloud_managed(self, subcloud_name):
# is this subcloud managed
sc = subcloud.Subcloud.get_by_name(self.context, subcloud_name)
return sc.management_state == dcm_consts.MANAGEMENT_MANAGED
return sc.management_state == dccommon_consts.MANAGEMENT_MANAGED
def is_subcloud_enabled(self, subcloud_name):
# is this subcloud enabled
sc = subcloud.Subcloud.get_by_name(self.context, subcloud_name)
# We only enable syncing if the subcloud is online and the initial
# sync has completed.
if (sc.availability_status == dcm_consts.AVAILABILITY_ONLINE and
if (sc.availability_status == dccommon_consts.AVAILABILITY_ONLINE and
sc.initial_sync_state == dco_consts.INITIAL_SYNC_STATE_COMPLETED):
return True
else:
@ -550,8 +549,8 @@ class GenericSyncManager(object):
# get a list of subclouds that are enabled
subclouds = db_api.subcloud_get_all(
self.context,
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=dco_consts.INITIAL_SYNC_STATE_COMPLETED)
# randomize to reduce likelihood of sync_lock contention

View File

@ -1,4 +1,5 @@
# Copyright 2016 Ericsson AB
# Copyright (c) 2018-2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -270,13 +271,13 @@ class QuotaManager(manager.Manager):
resource_with_service['cinder'] = collections.defaultdict(dict)
resource_with_service['neutron'] = collections.defaultdict(dict)
for limit in region_new_limit:
if limit in consts.NOVA_QUOTA_FIELDS:
if limit in dccommon_consts.NOVA_QUOTA_FIELDS:
resource_with_service['nova'].update(
{limit: region_new_limit[limit]})
elif limit in consts.CINDER_QUOTA_FIELDS:
elif limit in dccommon_consts.CINDER_QUOTA_FIELDS:
resource_with_service['cinder'].update(
{limit: region_new_limit[limit]})
elif limit in consts.NEUTRON_QUOTA_FIELDS:
elif limit in dccommon_consts.NEUTRON_QUOTA_FIELDS:
resource_with_service['neutron'].update(
{limit: region_new_limit[limit]})
return resource_with_service

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
import six
@ -23,7 +23,6 @@ import oslo_messaging
import resource
from dccommon import consts as dccommon_consts
from dcmanager.common import consts as dcm_consts
from dcorch.common import consts
from dcorch.common import context
from dcorch.common import exceptions
@ -248,8 +247,8 @@ class EngineService(service.Service):
return
# Check if the subcloud is ready to sync.
if (management_state == dcm_consts.MANAGEMENT_MANAGED) and \
(availability_status == dcm_consts.AVAILABILITY_ONLINE):
if (management_state == dccommon_consts.MANAGEMENT_MANAGED) and \
(availability_status == dccommon_consts.AVAILABILITY_ONLINE):
# Update the subcloud state and schedule an initial sync
self.gsm.update_subcloud_state(
subcloud_name,

View File

@ -42,7 +42,7 @@ class IdentitySyncThread(SyncThread):
engine_id=engine_id)
self.region_name = subcloud_name
if not self.endpoint_type:
self.endpoint_type = consts.ENDPOINT_TYPE_IDENTITY
self.endpoint_type = dccommon_consts.ENDPOINT_TYPE_IDENTITY
self.sync_handler_map = {
consts.RESOURCE_TYPE_IDENTITY_USERS:
self.sync_identity_resource,

View File

@ -1,4 +1,4 @@
# Copyright 2017-2020 Wind River
# Copyright 2017-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -58,7 +58,7 @@ class SysinvSyncThread(SyncThread):
endpoint_type=endpoint_type,
engine_id=engine_id)
if not self.endpoint_type:
self.endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
self.endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
self.sync_handler_map = {
consts.RESOURCE_TYPE_SYSINV_DNS:
self.sync_platform_resource,

View File

@ -1,4 +1,4 @@
# Copyright 2017-2020 Wind River
# Copyright 2017-2022 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -24,7 +24,6 @@ from oslo_utils import timeutils
from dccommon import consts as dccommon_consts
from dccommon.endpoint_cache import EndpointCache
from dcdbsync.dbsyncclient import client as dbsyncclient
from dcmanager.common import consts as dcm_consts
from dcmanager.rpc import client as dcmanager_rpc_client
from dcorch.common import consts
from dcorch.common import context
@ -97,7 +96,7 @@ class SyncThread(object):
def is_subcloud_managed(self):
# is this subcloud managed
subcloud = Subcloud.get_by_name(self.ctxt, self.subcloud_name)
return subcloud.management_state == dcm_consts.MANAGEMENT_MANAGED
return subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED
def is_subcloud_enabled(self):
# is this subcloud enabled
@ -105,7 +104,7 @@ class SyncThread(object):
# We only enable syncing if the subcloud is online and the initial
# sync has completed.
if subcloud.availability_status == dcm_consts.AVAILABILITY_ONLINE and \
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE and \
subcloud.initial_sync_state == consts.INITIAL_SYNC_STATE_COMPLETED:
return True
else:
@ -115,7 +114,7 @@ class SyncThread(object):
# base implementation of initializing the master client.
# The specific SyncThread subclasses may extend this.
if self.endpoint_type in consts.ENDPOINT_TYPES_LIST:
if self.endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
config = cfg.CONF.endpoint_cache
self.admin_session = EndpointCache.get_admin_session(
config.auth_uri,
@ -172,7 +171,7 @@ class SyncThread(object):
return
config = None
if self.endpoint_type in consts.ENDPOINT_TYPES_LIST:
if self.endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
config = cfg.CONF.endpoint_cache
self.sc_admin_session = EndpointCache.get_admin_session(
sc_auth_url,
@ -316,11 +315,11 @@ class SyncThread(object):
# Update dcmanager with the current sync status.
subcloud_enabled = self.is_subcloud_enabled()
if sync_requests:
self.set_sync_status(dcm_consts.SYNC_STATUS_OUT_OF_SYNC)
sync_status_start = dcm_consts.SYNC_STATUS_OUT_OF_SYNC
self.set_sync_status(dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
sync_status_start = dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
else:
self.set_sync_status(dcm_consts.SYNC_STATUS_IN_SYNC)
sync_status_start = dcm_consts.SYNC_STATUS_IN_SYNC
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
sync_status_start = dccommon_consts.SYNC_STATUS_IN_SYNC
# Failed orch requests were taken into consideration when reporting
# sync status to the dcmanager. They need to be removed from the
@ -423,19 +422,19 @@ class SyncThread(object):
states=states)
if (sync_requests and
sync_status_start != dcm_consts.SYNC_STATUS_OUT_OF_SYNC):
self.set_sync_status(dcm_consts.SYNC_STATUS_OUT_OF_SYNC)
sync_status_start != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
self.set_sync_status(dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
LOG.info("End of resource sync out-of-sync. " +
str(len(sync_requests)) + " sync request(s)",
extra=self.log_extra)
elif sync_requests and request_aborted:
if sync_status_start != dcm_consts.SYNC_STATUS_OUT_OF_SYNC:
self.set_sync_status(dcm_consts.SYNC_STATUS_OUT_OF_SYNC)
if sync_status_start != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC:
self.set_sync_status(dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
LOG.info("End of resource sync out-of-sync. " +
str(len(sync_requests)) + " sync request(s)" +
": request_aborted", extra=self.log_extra)
elif sync_status_start != dcm_consts.SYNC_STATUS_IN_SYNC:
self.set_sync_status(dcm_consts.SYNC_STATUS_IN_SYNC)
elif sync_status_start != dccommon_consts.SYNC_STATUS_IN_SYNC:
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
LOG.info("End of resource sync in-sync. " +
str(len(sync_requests)) + " sync request(s)",
extra=self.log_extra)

View File

@ -1,4 +1,5 @@
# Copyright (c) 2017 Ericsson AB
# Copyright (c) 2018-2022 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -22,6 +23,7 @@ from oslo_db import options
from oslo_utils import timeutils
from oslo_utils import uuidutils
from dccommon import consts as dccommon_consts
from dcorch.common import config
from dcorch.common import consts
from dcorch.common import exceptions
@ -114,14 +116,14 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
def test_create_orch_job(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_DNS)
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
orch_job = self.create_orch_job(self.ctx,
resource.id,
endpoint_type,
operation_type)
self.assertIsNotNone(orch_job)
self.assertEqual(consts.ENDPOINT_TYPE_PLATFORM,
self.assertEqual(dccommon_consts.ENDPOINT_TYPE_PLATFORM,
orch_job.endpoint_type)
created_orch_jobs = db_api.orch_job_get_all(
@ -139,7 +141,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
def no_test_unique_key_orch_job_uuid(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_DNS)
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
orch_job = self.create_orch_job(self.ctx,
resource.id,
@ -173,7 +175,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
consts.RESOURCE_TYPE_SYSINV_DNS)
target_region_name = "RegionOne"
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job = self.create_orch_job(self.ctx,
@ -211,7 +213,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
resource = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_DNS)
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job = self.create_orch_job(self.ctx,
@ -234,7 +236,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_DNS)
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job_sysinv = self.create_orch_job(self.ctx,
@ -327,7 +329,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_DNS)
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job_sysinv = self.create_orch_job(self.ctx,
@ -386,7 +388,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_DNS)
endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job_sysinv = self.create_orch_job(self.ctx,
@ -429,7 +431,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
orch_job_flavor.id,
target_region_name)
attrs_endpoint_type = consts.ENDPOINT_TYPE_PLATFORM
attrs_endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
attrs_resource_type = consts.RESOURCE_TYPE_SYSINV_DNS
orch_requests_attrs_1 = db_api.orch_request_get_by_attrs(
self.ctx,

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -20,7 +20,7 @@ from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options
from dcmanager.common import consts as dcm_consts
from dccommon import consts as dccommon_consts
from dcorch.common import config
from dcorch.common import exceptions
from dcorch.db import api as api
@ -61,7 +61,7 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
def create_default_subcloud(self, ctxt):
region_name = 'RegionOne'
software_version = '17.07'
availability_status = dcm_consts.AVAILABILITY_ONLINE
availability_status = dccommon_consts.AVAILABILITY_ONLINE
subcloud = self.create_subcloud(
ctxt, region_name,
software_version=software_version,
@ -86,7 +86,7 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
def test_update_subcloud(self):
subcloud = self.create_default_subcloud(self.ctx)
availability_status_update = dcm_consts.AVAILABILITY_OFFLINE
availability_status_update = dccommon_consts.AVAILABILITY_OFFLINE
software_version_update = subcloud.software_version + '1'
values = {'availability_status': availability_status_update,
'software_version': software_version_update}
@ -112,7 +112,7 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
def test_delete_all_subcloud(self):
region_names = ['RegionOne', 'RegionTwo']
software_version = '17.07'
availability_status = dcm_consts.AVAILABILITY_ONLINE
availability_status = dccommon_consts.AVAILABILITY_ONLINE
for region_name in region_names:
subcloud = self.create_subcloud(
@ -156,7 +156,7 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
def test_subcloud_get_by_availability_status(self):
region_names = ['RegionOne', 'RegionTwo']
software_version = '17.07'
availability_status = dcm_consts.AVAILABILITY_ONLINE
availability_status = dccommon_consts.AVAILABILITY_ONLINE
for region_name in region_names:
subcloud = self.create_subcloud(
self.ctx, region_name,
@ -166,7 +166,7 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
region_names = ['RegionThree', 'RegionFour']
software_version = '17.07'
availability_status = dcm_consts.AVAILABILITY_OFFLINE
availability_status = dccommon_consts.AVAILABILITY_OFFLINE
for region_name in region_names:
subcloud = self.create_subcloud(
self.ctx, region_name,
@ -176,20 +176,20 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
by_statuses = db_api.subcloud_get_all(
self.ctx,
availability_status=dcm_consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
self.assertIsNotNone(by_statuses)
for by_status in by_statuses:
self.assertEqual(dcm_consts.AVAILABILITY_ONLINE,
self.assertEqual(dccommon_consts.AVAILABILITY_ONLINE,
by_status.availability_status)
by_statuses = db_api.subcloud_get_all(
self.ctx,
availability_status=dcm_consts.AVAILABILITY_OFFLINE)
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
self.assertIsNotNone(by_statuses)
for by_status in by_statuses:
self.assertEqual(dcm_consts.AVAILABILITY_OFFLINE,
self.assertEqual(dccommon_consts.AVAILABILITY_OFFLINE,
by_status.availability_status)
def test_subcloud_duplicate_region_names(self):

View File

@ -1,4 +1,4 @@
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -14,7 +14,7 @@
import mock
from dcmanager.common import consts as dcm_consts
from dccommon import consts as dccommon_consts
from dcorch.common import consts
from dcorch.common import exceptions
from dcorch.db.sqlalchemy import api as db_api
@ -46,8 +46,8 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
def create_subcloud_static(ctxt, name, **kwargs):
values = {
'software_version': '10.04',
'management_state': dcm_consts.MANAGEMENT_MANAGED,
'availability_status': dcm_consts.AVAILABILITY_ONLINE,
'management_state': dccommon_consts.MANAGEMENT_MANAGED,
'availability_status': dccommon_consts.AVAILABILITY_ONLINE,
'initial_sync_state': '',
'capabilities': {},
}
@ -85,8 +85,8 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
self.create_subcloud_static(
self.ctx,
name='subcloud1',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
gsm = generic_sync_manager.GenericSyncManager(self.engine_id)
@ -97,23 +97,23 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
# Compare all states (match)
match = gsm.subcloud_state_matches(
'subcloud1',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
self.assertTrue(match)
# Compare all states (not a match)
match = gsm.subcloud_state_matches(
'subcloud1',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_OFFLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
self.assertFalse(match)
# Compare one state (match)
match = gsm.subcloud_state_matches(
'subcloud1',
availability_status=dcm_consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
self.assertTrue(match)
# Compare one state (not a match)
@ -127,8 +127,8 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
self.create_subcloud_static(
self.ctx,
name='subcloud1',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
gsm = generic_sync_manager.GenericSyncManager(self.engine_id)
@ -141,8 +141,8 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
exceptions.SubcloudNotFound,
gsm.subcloud_state_matches,
'subcloud2',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
def test_update_subcloud_state(self):
@ -150,8 +150,8 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
self.create_subcloud_static(
self.ctx,
name='subcloud1',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
gsm = generic_sync_manager.GenericSyncManager(self.engine_id)
@ -162,28 +162,28 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
# Update all states
gsm.update_subcloud_state(
'subcloud1',
management_state=dcm_consts.MANAGEMENT_UNMANAGED,
availability_status=dcm_consts.AVAILABILITY_OFFLINE,
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
# Compare all states (match)
match = gsm.subcloud_state_matches(
'subcloud1',
management_state=dcm_consts.MANAGEMENT_UNMANAGED,
availability_status=dcm_consts.AVAILABILITY_OFFLINE,
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
self.assertTrue(match)
# Update one state
gsm.update_subcloud_state(
'subcloud1',
availability_status=dcm_consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
# Compare all states (match)
match = gsm.subcloud_state_matches(
'subcloud1',
management_state=dcm_consts.MANAGEMENT_UNMANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
self.assertTrue(match)
@ -192,8 +192,8 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
self.create_subcloud_static(
self.ctx,
name='subcloud1',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
gsm = generic_sync_manager.GenericSyncManager(self.engine_id)
@ -206,6 +206,6 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
exceptions.SubcloudNotFound,
gsm.update_subcloud_state,
'subcloud2',
management_state=dcm_consts.MANAGEMENT_MANAGED,
availability_status=dcm_consts.AVAILABILITY_ONLINE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2022 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -14,7 +14,7 @@
import mock
from dcmanager.common import consts as dcm_consts
from dccommon import consts as dccommon_consts
from dcorch.common import consts
from dcorch.db.sqlalchemy import api as db_api
from dcorch.engine import initial_sync_manager
@ -70,7 +70,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
def create_subcloud_static(ctxt, name, **kwargs):
values = {
'software_version': '10.04',
'availability_status': dcm_consts.AVAILABILITY_ONLINE,
'availability_status': dccommon_consts.AVAILABILITY_ONLINE,
}
values.update(kwargs)
return db_api.subcloud_create(ctxt, name, values=values)