From 9180e7df84840745138827688cc8a482dd02f09c Mon Sep 17 00:00:00 2001 From: Robert Church Date: Thu, 6 Feb 2020 16:44:48 -0500 Subject: [PATCH] Add various locking support to DCManager Support the following lock updates in DCManager: - Provide a function decorator in common/utility.py for a synchronized lock that supports both external locks and internal fair locks. This decorator is setup, by default, for external locks. - Refactor update_subcloud_endpoint_status() so that a common private method is provided that is suitable for locking. - Update subcloud_manager.py to provide a function decorator to produce an internal fair lock based on a unique subcloud name. This decorator is specifically designed to be used with _update_subcloud_endpoint_status(). This will ensure that the multi-threaded DCManager process will only update subcloud endpoint information in a synchronized manner. - Provide an API lock to the SubcloudsController for the post, patch, and delete operations Update distributedcloud requirements and spec file to require oslo.concurrency >= 3.29.1. This is the latest version supported by the Openstack Stein and is a version containing fair lock support. Update unit tests: - Added unit test for update_subcloud_endpoint_status. This verifies high level functionality and the calling of fair locks based on the unique subcloud name. - Fixed intermittent failure seen when executing the add_subcloud unit test by mocking thread.Threading. - Leverage the use of oslo_concurrency's behavior to use the OSLO_LOCK_PATH environment variable if the lock_path config option is not set. Currently this is not set as we specify a hard coded external lock path at runtime. This allows us to set the lock path for tox tests via the test environment. Change-Id: Id1902e8553408cbdd60b648efc39d59e8edcdb55 Depends-On: https://review.opendev.org/#/c/707188/ Closes-Bug: #1855359 Signed-off-by: Robert Church --- distributedcloud/centos/distributedcloud.spec | 2 +- .../dcmanager/api/controllers/v1/subclouds.py | 6 + distributedcloud/dcmanager/common/utils.py | 55 ++++ .../dcmanager/manager/subcloud_manager.py | 136 ++++---- .../tests/unit/manager/test_service.py | 4 +- .../unit/manager/test_subcloud_manager.py | 303 ++++++++++++++---- distributedcloud/requirements.txt | 2 +- distributedcloud/tox.ini | 1 + 8 files changed, 386 insertions(+), 123 deletions(-) diff --git a/distributedcloud/centos/distributedcloud.spec b/distributedcloud/centos/distributedcloud.spec index 50a8026e0..ab57ca268 100644 --- a/distributedcloud/centos/distributedcloud.spec +++ b/distributedcloud/centos/distributedcloud.spec @@ -43,7 +43,7 @@ BuildRequires: python-keyring BuildRequires: python-keystonemiddleware BuildRequires: python-keystoneauth1 >= 3.1.0 BuildRequires: python-netaddr -BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-concurrency >= 3.29.1 BuildRequires: python-oslo-config BuildRequires: python-oslo-context BuildRequires: python-oslo-db diff --git a/distributedcloud/dcmanager/api/controllers/v1/subclouds.py b/distributedcloud/dcmanager/api/controllers/v1/subclouds.py index aef9f347a..55714f26b 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subclouds.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subclouds.py @@ -43,6 +43,7 @@ from dcmanager.common import consts from dcmanager.common import exceptions from dcmanager.common.i18n import _ from dcmanager.common import install_consts +from dcmanager.common import utils from dcmanager.db import api as db_api from dcmanager.drivers.openstack.sysinv_v1 import SysinvClient from dcmanager.rpc import client as rpc_client @@ -55,6 +56,8 @@ SYSTEM_MODE_DUPLEX = "duplex" SYSTEM_MODE_SIMPLEX = "simplex" SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct" +LOCK_NAME = 'SubcloudsController' + class SubcloudsController(object): VERSION_ALIASES = { @@ -436,6 +439,7 @@ class SubcloudsController(object): return subcloud_dict + @utils.synchronized(LOCK_NAME) @index.when(method='POST', template='json') def post(self, subcloud_ref=None): """Create and deploy a new subcloud. @@ -525,6 +529,7 @@ class SubcloudsController(object): else: pecan.abort(400, _('Invalid request')) + @utils.synchronized(LOCK_NAME) @index.when(method='PATCH', template='json') def patch(self, subcloud_ref=None): """Update a subcloud. @@ -585,6 +590,7 @@ class SubcloudsController(object): LOG.exception(e) pecan.abort(500, _('Unable to update subcloud')) + @utils.synchronized(LOCK_NAME) @index.when(method='delete', template='json') def delete(self, subcloud_ref): """Delete a subcloud. diff --git a/distributedcloud/dcmanager/common/utils.py b/distributedcloud/dcmanager/common/utils.py index a59c5bf77..2a5f33605 100644 --- a/distributedcloud/dcmanager/common/utils.py +++ b/distributedcloud/dcmanager/common/utils.py @@ -20,14 +20,27 @@ # of an applicable Wind River license agreement. # +import grp import itertools +import os +import pwd import six.moves +import tsconfig.tsconfig as tsc + +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_log import log as logging from dcmanager.common import consts from dcmanager.common import exceptions from dcmanager.db import api as db_api from dcmanager.drivers.openstack import vim +LOG = logging.getLogger(__name__) + +DC_MANAGER_USERNAME = "root" +DC_MANAGER_GRPNAME = "root" + def get_import_path(cls): return cls.__module__ + "." + cls.__name__ @@ -90,3 +103,45 @@ def get_sw_update_opts(context, return db_api.sw_update_opts_w_name_db_model_to_dict( sw_update_opts_ref, consts.SW_UPDATE_DEFAULT_TITLE) + + +def ensure_lock_path(): + # Determine the oslo_concurrency lock path: + # 1) First, from the oslo_concurrency section of the config + # a) If not set via an option default or config file, oslo_concurrency + # sets it to the OSLO_LOCK_PATH env variable + # 2) Then if not set, set it to a specific directory under + # tsc.VOLATILE_PATH + + if cfg.CONF.oslo_concurrency.lock_path: + lock_path = cfg.CONF.oslo_concurrency.lock_path + else: + lock_path = os.path.join(tsc.VOLATILE_PATH, "dcmanager") + + if not os.path.isdir(lock_path): + try: + uid = pwd.getpwnam(DC_MANAGER_USERNAME).pw_uid + gid = grp.getgrnam(DC_MANAGER_GRPNAME).gr_gid + os.makedirs(lock_path) + os.chown(lock_path, uid, gid) + LOG.info("Created directory=%s" % lock_path) + + except OSError as e: + LOG.exception("makedir %s OSError=%s encountered" % + (lock_path, e)) + return None + + return lock_path + + +def synchronized(name, external=True, fair=False): + if external: + prefix = 'DCManager-' + lock_path = ensure_lock_path() + else: + prefix = None + lock_path = None + + return lockutils.synchronized(name, lock_file_prefix=prefix, + external=external, lock_path=lock_path, + semaphores=None, delay=0.01, fair=fair) diff --git a/distributedcloud/dcmanager/manager/subcloud_manager.py b/distributedcloud/dcmanager/manager/subcloud_manager.py index d9cebe606..5952484dc 100644 --- a/distributedcloud/dcmanager/manager/subcloud_manager.py +++ b/distributedcloud/dcmanager/manager/subcloud_manager.py @@ -44,6 +44,7 @@ from dcmanager.common import context from dcmanager.common import exceptions from dcmanager.common.i18n import _ from dcmanager.common import manager +from dcmanager.common import utils from dcmanager.db import api as db_api from dcmanager.drivers.openstack.sysinv_v1 import SysinvClient from dcmanager.manager.subcloud_install import SubcloudInstall @@ -77,6 +78,23 @@ USERS_TO_REPLICATE = [ SERVICES_USER = 'services' +def sync_update_subcloud_endpoint_status(func): + """Synchronized lock decorator for _update_subcloud_endpoint_status. """ + + def _get_lock_and_call(*args, **kwargs): + """Get a single fair lock per subcloud based on subcloud name. """ + + # subcloud name is the 3rd argument to + # _update_subcloud_endpoint_status() + @utils.synchronized(args[2], external=False, fair=True) + def _call_func(*args, **kwargs): + return func(*args, **kwargs) + + return _call_func(*args, **kwargs) + + return _get_lock_and_call + + class SubcloudManager(manager.Manager): """Manages tasks related to subclouds.""" @@ -680,15 +698,16 @@ class SubcloudManager(manager.Manager): return db_api.subcloud_db_model_to_dict(subcloud) - def _update_endpoint_status_for_subcloud(self, context, subcloud_id, - endpoint_type, sync_status, - alarmable): - """Update subcloud endpoint status + def _update_online_managed_subcloud(self, context, subcloud_id, + endpoint_type, sync_status, + alarmable): + """Update online/managed subcloud endpoint status :param context: request context object :param subcloud_id: id of subcloud to update :param endpoint_type: endpoint type to update :param sync_status: sync status to set + :param alarmable: controls raising an alarm if applicable """ subcloud_status_list = [] @@ -828,6 +847,57 @@ class SubcloudManager(manager.Manager): else: LOG.error("Subcloud not found:%s" % subcloud_id) + @sync_update_subcloud_endpoint_status + def _update_subcloud_endpoint_status( + self, context, + subcloud_name, + endpoint_type=None, + sync_status=consts.SYNC_STATUS_OUT_OF_SYNC, + alarmable=True): + """Update subcloud endpoint status + + :param context: request context object + :param subcloud_name: name of subcloud to update + :param endpoint_type: endpoint type to update + :param sync_status: sync status to set + :param alarmable: controls raising an alarm if applicable + """ + + if not subcloud_name: + raise exceptions.BadRequest( + resource='subcloud', + msg='Subcloud name not provided') + + try: + subcloud = db_api.subcloud_get_by_name(context, subcloud_name) + except Exception as e: + LOG.exception(e) + raise e + + # Only allow updating the sync status if managed and online. + # This means if a subcloud is going offline or unmanaged, then + # the sync status update must be done first. + if (((subcloud.availability_status == + consts.AVAILABILITY_ONLINE) + and (subcloud.management_state == + consts.MANAGEMENT_MANAGED)) + or (sync_status != consts.SYNC_STATUS_IN_SYNC)): + + # update a single subcloud + try: + self._update_online_managed_subcloud(context, + subcloud.id, + endpoint_type, + sync_status, + alarmable) + except Exception as e: + LOG.exception(e) + raise e + else: + LOG.info("Ignoring unmanaged/offline subcloud sync_status " + "update for subcloud:%s endpoint:%s sync:%s" % + (subcloud_name, endpoint_type, sync_status)) + def update_subcloud_endpoint_status( self, context, subcloud_name=None, @@ -840,61 +910,15 @@ class SubcloudManager(manager.Manager): :param subcloud_name: name of subcloud to update :param endpoint_type: endpoint type to update :param sync_status: sync status to set + :param alarmable: controls raising an alarm if applicable """ - subcloud = None - if subcloud_name: - try: - subcloud = db_api.subcloud_get_by_name(context, subcloud_name) - except Exception as e: - LOG.exception(e) - raise e - - # Only allow updating the sync status if managed and online. - # This means if a subcloud is going offline or unmanaged, then - # the sync status update must be done first. - if (((subcloud.availability_status == - consts.AVAILABILITY_ONLINE) - and (subcloud.management_state == - consts.MANAGEMENT_MANAGED)) - or (sync_status != consts.SYNC_STATUS_IN_SYNC)): - - # update a single subcloud - try: - self._update_endpoint_status_for_subcloud(context, - subcloud.id, - endpoint_type, - sync_status, - alarmable) - except Exception as e: - LOG.exception(e) - raise e - else: - LOG.info("Ignoring unmanaged/offline subcloud sync_status " - "update for subcloud:%s endpoint:%s sync:%s" % - (subcloud_name, endpoint_type, sync_status)) - + self._update_subcloud_endpoint_status( + context, subcloud_name, endpoint_type, sync_status, alarmable) else: # update all subclouds for subcloud in db_api.subcloud_get_all(context): - if (((subcloud.availability_status == - consts.AVAILABILITY_ONLINE) - and (subcloud.management_state == - consts.MANAGEMENT_MANAGED)) - or (sync_status != consts.SYNC_STATUS_IN_SYNC)): - - try: - self._update_endpoint_status_for_subcloud( - context, - subcloud.id, - endpoint_type, - sync_status, - alarmable) - except Exception as e: - LOG.exception(e) - raise e - else: - LOG.info("Ignoring unmanaged/offline subcloud sync_status " - "update for subcloud:%s endpoint:%s sync:%s" % - (subcloud.name, endpoint_type, sync_status)) + self._update_subcloud_endpoint_status( + context, subcloud.name, endpoint_type, sync_status, + alarmable) diff --git a/distributedcloud/dcmanager/tests/unit/manager/test_service.py b/distributedcloud/dcmanager/tests/unit/manager/test_service.py index 5f0d12861..5213afdbb 100644 --- a/distributedcloud/dcmanager/tests/unit/manager/test_service.py +++ b/distributedcloud/dcmanager/tests/unit/manager/test_service.py @@ -118,7 +118,7 @@ class TestDCManagerService(base.DCManagerTestCase): self.service_obj.init_tgm() self.service_obj.init_managers() self.service_obj.delete_subcloud( - self.context, subcloud_id='1') + self.context, subcloud_id=1) mock_subcloud_manager().delete_subcloud.\ assert_called_once_with(self.context, mock.ANY) @@ -129,7 +129,7 @@ class TestDCManagerService(base.DCManagerTestCase): self.service_obj.init_tgm() self.service_obj.init_managers() self.service_obj.update_subcloud( - self.context, subcloud_id='1', management_state='testmgmtstatus') + self.context, subcloud_id=1, management_state='testmgmtstatus') mock_subcloud_manager().update_subcloud.\ assert_called_once_with(self.context, mock.ANY, mock.ANY, mock.ANY, mock.ANY) diff --git a/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py b/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py index aa6f98ee3..2735a1896 100644 --- a/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py +++ b/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py @@ -19,64 +19,76 @@ import mock -from oslo_config import cfg +from oslo_concurrency import lockutils from oslo_utils import timeutils import sys sys.modules['fm_core'] = mock.Mock() -from dcorch.rpc import client as dcorch_rpc_client +import threading from dcmanager.common import consts from dcmanager.common import exceptions +from dcmanager.db.sqlalchemy import api as db_api from dcmanager.manager import subcloud_manager from dcmanager.tests import base from dcmanager.tests import utils +from dcorch.common import consts as dcorch_consts +from dcorch.rpc import client as dcorch_rpc_client from ddt import ddt from ddt import file_data -CONF = cfg.CONF -FAKE_ID = '1' -FAKE_SUBCLOUD_DATA = {"name": "subcloud1", - "description": "subcloud1 description", - "location": "subcloud1 location", - "system_mode": "duplex", - "management_subnet": "192.168.101.0/24", - "management_start_address": "192.168.101.3", - "management_end_address": "192.168.101.4", - "management_gateway_address": "192.168.101.1", - "systemcontroller_gateway_address": "192.168.204.101", - "external_oam_subnet": "10.10.10.0/24", - "external_oam_gateway_address": "10.10.10.1", - "external_oam_floating_address": "10.10.10.12"} -FAKE_SUBCLOUD_INSTALL_VALUES = { - 'image': 'image: http://128.224.115.21/iso/bootimage.iso', - 'software_version': '20.01', - 'bootstrap_interface': 'enp0s3', - 'bootstrap_address': '128.118.101.5', - 'bootstrap_address_prefix': 23, - 'bmc_address': '128.224.64.180', - 'bmc_username': 'root', - 'nexthop_gateway': '128.224.150.1', - 'network_address': '128.224.144.0', - 'network_mask': '255.255.254.0', - 'install_type': 3, - 'console_type': 'tty0', - 'rootfs_device': '/dev/disk/by-path/pci-0000:5c:00.0-scsi-0:1:0:0', - 'boot_device': ' /dev/disk/by-path/pci-0000:5c:00.0-scsi-0:1:0:0' -} + +class FakeDCOrchAPI(object): + def __init__(self): + self.update_subcloud_states = mock.MagicMock() + self.add_subcloud_sync_endpoint_type = mock.MagicMock() -class Controller(object): +class FakeService(object): + def __init__(self, type, id): + self.type = type + self.id = id + + +FAKE_SERVICES = [ + FakeService( + dcorch_consts.ENDPOINT_TYPE_PLATFORM, + 1 + ), + FakeService( + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + 2 + ), + FakeService( + dcorch_consts.ENDPOINT_TYPE_PATCHING, + 3 + ), + FakeService( + dcorch_consts.ENDPOINT_TYPE_FM, + 4 + ), + FakeService( + dcorch_consts.ENDPOINT_TYPE_NFV, + 5 + ), +] + + +class FakeController(object): def __init__(self, hostname): self.hostname = hostname -class Service(object): - def __init__(self, type, id): - self.type = type - self.id = id +FAKE_CONTROLLERS = [ + FakeController( + 'controller-0' + ), + FakeController( + 'controller-1' + ), +] class Subcloud(object): @@ -111,18 +123,44 @@ class Subcloud(object): class TestSubcloudManager(base.DCManagerTestCase): def setUp(self): super(TestSubcloudManager, self).setUp() - self.ctxt = utils.dummy_context() - @mock.patch.object(dcorch_rpc_client, 'EngineClient') - @mock.patch.object(subcloud_manager, 'KeystoneClient') - @mock.patch.object(subcloud_manager, 'context') - def test_init(self, mock_context, mock_endpoint, mock_dcorch_rpc_client): - mock_context.get_admin_context.return_value = self.ctxt - am = subcloud_manager.SubcloudManager() - self.assertIsNotNone(am) - self.assertEqual('subcloud_manager', am.service_name) - self.assertEqual('localhost', am.host) - self.assertEqual(self.ctxt, am.context) + # Mock the DCOrch API + self.fake_dcorch_api = FakeDCOrchAPI() + p = mock.patch('dcorch.rpc.client.EngineClient') + self.mock_dcorch_api = p.start() + self.mock_dcorch_api.return_value = self.fake_dcorch_api + self.addCleanup(p.stop) + + # Mock the context + p = mock.patch.object(subcloud_manager, 'context') + self.mock_context = p.start() + self.mock_context.get_admin_context.return_value = self.ctx + self.addCleanup(p.stop) + + @staticmethod + def create_subcloud_static(ctxt, **kwargs): + values = { + "name": "subcloud1", + "description": "subcloud1 description", + "location": "subcloud1 location", + 'software_version': "18.03", + "management_subnet": "192.168.101.0/24", + "management_gateway_ip": "192.168.101.1", + "management_start_ip": "192.168.101.3", + "management_end_ip": "192.168.101.4", + "systemcontroller_gateway_ip": "192.168.204.101", + 'deploy_status': "not-deployed", + 'openstack_installed': False, + } + values.update(kwargs) + return db_api.subcloud_create(ctxt, **values) + + def test_init(self): + sm = subcloud_manager.SubcloudManager() + self.assertIsNotNone(sm) + self.assertEqual('subcloud_manager', sm.service_name) + self.assertEqual('localhost', sm.host) + self.assertEqual(self.ctx, sm.context) @file_data(utils.get_data_filepath('dcmanager', 'subclouds')) @mock.patch.object(dcorch_rpc_client, 'EngineClient') @@ -138,20 +176,19 @@ class TestSubcloudManager(base.DCManagerTestCase): '_write_subcloud_ansible_config') @mock.patch.object(subcloud_manager, 'keyring') - def test_add_subcloud(self, value, mock_keyring, + @mock.patch.object(threading.Thread, + 'start') + def test_add_subcloud(self, value, mock_thread_start, mock_keyring, mock_write_subcloud_ansible_config, mock_update_subcloud_inventory, mock_create_addn_hosts, mock_sysinv_client, mock_db_api, mock_keystone_client, mock_context, mock_dcorch_rpc_client): - value = utils.create_subcloud_dict(value) - controllers = [Controller('controller-0'), Controller('controller-1')] - services = [Service('identity', '1234'), - Service('faultmanagement', '1234'), - Service('patching', '1234'), - Service('platform', '1234'), - Service('nfv', '1234')] - mock_context.get_admin_context.return_value = self.ctxt + + values = utils.create_subcloud_dict(value) + controllers = FAKE_CONTROLLERS + services = FAKE_SERVICES + mock_context.get_admin_context.return_value = self.ctx mock_db_api.subcloud_get_by_name.side_effect = \ exceptions.SubcloudNameNotFound() @@ -160,7 +197,7 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_keyring.get_password.return_value = "testpassword" sm = subcloud_manager.SubcloudManager() - sm.add_subcloud(self.ctxt, payload=value) + sm.add_subcloud(self.ctx, payload=values) mock_db_api.subcloud_create.assert_called_once() mock_db_api.subcloud_status_create.assert_called() mock_sysinv_client().create_route.assert_called() @@ -169,6 +206,7 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_update_subcloud_inventory.assert_called_once() mock_write_subcloud_ansible_config.assert_called_once() mock_keyring.get_password.assert_called() + mock_thread_start.assert_called_once() @file_data(utils.get_data_filepath('dcmanager', 'subclouds')) @mock.patch.object(dcorch_rpc_client, 'EngineClient') @@ -184,14 +222,14 @@ class TestSubcloudManager(base.DCManagerTestCase): mock_db_api, mock_context, mock_dcorch_rpc_client): - controllers = [Controller('controller-0'), Controller('controller-1')] - mock_context.get_admin_context.return_value = self.ctxt + controllers = FAKE_CONTROLLERS + mock_context.get_admin_context.return_value = self.ctx data = utils.create_subcloud_dict(value) fake_subcloud = Subcloud(data, False) mock_db_api.subcloud_get.return_value = fake_subcloud mock_sysinv_client().get_controller_hosts.return_value = controllers sm = subcloud_manager.SubcloudManager() - sm.delete_subcloud(self.ctxt, subcloud_id=data['id']) + sm.delete_subcloud(self.ctx, subcloud_id=data['id']) mock_sysinv_client().delete_route.assert_called() mock_keystone_client().delete_region.assert_called_once() mock_db_api.subcloud_destroy.assert_called_once() @@ -205,13 +243,13 @@ class TestSubcloudManager(base.DCManagerTestCase): def test_update_subcloud(self, value, mock_db_api, mock_endpoint, mock_context, mock_dcorch_rpc_client): - mock_context.get_admin_context.return_value = self.ctxt + mock_context.get_admin_context.return_value = self.ctx data = utils.create_subcloud_dict(value) subcloud_result = Subcloud(data, True) mock_db_api.subcloud_get.return_value = subcloud_result mock_db_api.subcloud_update.return_value = subcloud_result sm = subcloud_manager.SubcloudManager() - sm.update_subcloud(self.ctxt, data['id'], + sm.update_subcloud(self.ctx, data['id'], management_state=consts.MANAGEMENT_MANAGED, description="subcloud new description", location="subcloud new location") @@ -221,3 +259,142 @@ class TestSubcloudManager(base.DCManagerTestCase): management_state=consts.MANAGEMENT_MANAGED, description="subcloud new description", location="subcloud new location") + + def test_update_subcloud_endpoint_status(self): + # create a subcloud + subcloud = self.create_subcloud_static(self.ctx, name='subcloud1') + self.assertIsNotNone(subcloud) + self.assertEqual(subcloud.management_state, + consts.MANAGEMENT_UNMANAGED) + self.assertEqual(subcloud.availability_status, + consts.AVAILABILITY_OFFLINE) + + # create sync statuses for endpoints + for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM, + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + dcorch_consts.ENDPOINT_TYPE_PATCHING, + dcorch_consts.ENDPOINT_TYPE_FM, + dcorch_consts.ENDPOINT_TYPE_NFV]: + status = db_api.subcloud_status_create( + self.ctx, subcloud.id, endpoint) + self.assertIsNotNone(status) + self.assertEqual(status.sync_status, consts.SYNC_STATUS_UNKNOWN) + + # Update/verify each status with the default sync state: out-of-sync + sm = subcloud_manager.SubcloudManager() + for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM, + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + dcorch_consts.ENDPOINT_TYPE_PATCHING, + dcorch_consts.ENDPOINT_TYPE_FM, + dcorch_consts.ENDPOINT_TYPE_NFV]: + # Update + sm.update_subcloud_endpoint_status( + self.ctx, subcloud_name=subcloud.name, + endpoint_type=endpoint) + + # Verify + updated_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint) + self.assertIsNotNone(updated_subcloud_status) + self.assertEqual(updated_subcloud_status.sync_status, + consts.SYNC_STATUS_OUT_OF_SYNC) + + # Attempt to update each status to be in-sync for an offline/unmanaged + # subcloud. This is not allowed. Verify no change. + for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM, + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + dcorch_consts.ENDPOINT_TYPE_PATCHING, + dcorch_consts.ENDPOINT_TYPE_FM, + dcorch_consts.ENDPOINT_TYPE_NFV]: + sm.update_subcloud_endpoint_status( + self.ctx, subcloud_name=subcloud.name, + endpoint_type=endpoint, + sync_status=consts.SYNC_STATUS_IN_SYNC) + + updated_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint) + self.assertIsNotNone(updated_subcloud_status) + # No change in status: Only online/managed clouds are updated + self.assertEqual(updated_subcloud_status.sync_status, + consts.SYNC_STATUS_OUT_OF_SYNC) + + # Set/verify the subcloud is online/unmanaged + db_api.subcloud_update( + self.ctx, subcloud.id, + availability_status=consts.AVAILABILITY_ONLINE) + subcloud = db_api.subcloud_get(self.ctx, subcloud.id) + self.assertIsNotNone(subcloud) + self.assertEqual(subcloud.management_state, + consts.MANAGEMENT_UNMANAGED) + self.assertEqual(subcloud.availability_status, + consts.AVAILABILITY_ONLINE) + + # Attempt to update each status to be in-sync for an online/unmanaged + # subcloud. This is not allowed. Verify no change. + for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM, + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + dcorch_consts.ENDPOINT_TYPE_PATCHING, + dcorch_consts.ENDPOINT_TYPE_FM, + dcorch_consts.ENDPOINT_TYPE_NFV]: + sm.update_subcloud_endpoint_status( + self.ctx, subcloud_name=subcloud.name, + endpoint_type=endpoint, + sync_status=consts.SYNC_STATUS_IN_SYNC) + + updated_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint) + self.assertIsNotNone(updated_subcloud_status) + # No change in status: Only online/managed clouds are updated + self.assertEqual(updated_subcloud_status.sync_status, + consts.SYNC_STATUS_OUT_OF_SYNC) + + # Set/verify the subcloud is online/managed + db_api.subcloud_update( + self.ctx, subcloud.id, + management_state=consts.MANAGEMENT_MANAGED) + subcloud = db_api.subcloud_get(self.ctx, subcloud.id) + self.assertIsNotNone(subcloud) + self.assertEqual(subcloud.management_state, + consts.MANAGEMENT_MANAGED) + self.assertEqual(subcloud.availability_status, + consts.AVAILABILITY_ONLINE) + + # Attempt to update each status to be in-sync for an online/managed + # subcloud + for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM, + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + dcorch_consts.ENDPOINT_TYPE_PATCHING, + dcorch_consts.ENDPOINT_TYPE_FM, + dcorch_consts.ENDPOINT_TYPE_NFV]: + sm.update_subcloud_endpoint_status( + self.ctx, subcloud_name=subcloud.name, + endpoint_type=endpoint, + sync_status=consts.SYNC_STATUS_IN_SYNC) + + updated_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint) + self.assertIsNotNone(updated_subcloud_status) + self.assertEqual(updated_subcloud_status.sync_status, + consts.SYNC_STATUS_IN_SYNC) + + # Change the sync status to 'out-of-sync' and verify fair lock access + # based on subcloud name for each update + with mock.patch.object(lockutils, 'internal_fair_lock') as mock_lock: + for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM, + dcorch_consts.ENDPOINT_TYPE_IDENTITY, + dcorch_consts.ENDPOINT_TYPE_PATCHING, + dcorch_consts.ENDPOINT_TYPE_FM, + dcorch_consts.ENDPOINT_TYPE_NFV]: + sm.update_subcloud_endpoint_status( + self.ctx, subcloud_name=subcloud.name, + endpoint_type=endpoint, + sync_status=consts.SYNC_STATUS_OUT_OF_SYNC) + # Verify lock was called + mock_lock.assert_called_with(subcloud.name) + + # Verify status was updated + updated_subcloud_status = db_api.subcloud_status_get( + self.ctx, subcloud.id, endpoint) + self.assertIsNotNone(updated_subcloud_status) + self.assertEqual(updated_subcloud_status.sync_status, + consts.SYNC_STATUS_OUT_OF_SYNC) diff --git a/distributedcloud/requirements.txt b/distributedcloud/requirements.txt index 7a6febea3..038265d20 100644 --- a/distributedcloud/requirements.txt +++ b/distributedcloud/requirements.txt @@ -25,7 +25,7 @@ WebOb>=1.7.1 # MIT alembic>=0.8.10 # MIT six>=1.9.0 # MIT stevedore>=1.20.0 # Apache-2.0 -oslo.concurrency>=3.8.0 # Apache-2.0 +oslo.concurrency>=3.29.1 # Apache-2.0 oslo.config>=4.0.0 # Apache-2.0 oslo.context>=2.14.0 # Apache-2.0 oslo.db>=4.21.1 # Apache-2.0 diff --git a/distributedcloud/tox.ini b/distributedcloud/tox.ini index e22da6da6..3790cd86f 100644 --- a/distributedcloud/tox.ini +++ b/distributedcloud/tox.ini @@ -51,6 +51,7 @@ setenv = CURRENT_CFG_FILE={toxinidir}/.current.cfg DATA_DIRECTORY={toxinidir}/dcmanager/tests/data SINGLE_REPO=True + OSLO_LOCK_PATH={toxinidir} commands = find {toxinidir} -not -path '{toxinidir}/.tox/*' -name '*.py[c|o]' -delete python setup_ddt_tests.py testr --slowest --testr-args='{posargs}'