diff --git a/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py b/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py old mode 100755 new mode 100644 index bccaf9f0e..9a12f223f --- a/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subcloud_group.py @@ -49,7 +49,7 @@ SUPPORTED_GROUP_APPLY_TYPES = [ MAX_SUBCLOUD_GROUP_NAME_LEN = 255 MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN = 255 MIN_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 1 -MAX_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 100 +MAX_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 500 class SubcloudGroupsController(object): diff --git a/distributedcloud/dcmanager/api/controllers/v1/sw_update_strategy.py b/distributedcloud/dcmanager/api/controllers/v1/sw_update_strategy.py index 064bdcf0b..0e6598ab0 100755 --- a/distributedcloud/dcmanager/api/controllers/v1/sw_update_strategy.py +++ b/distributedcloud/dcmanager/api/controllers/v1/sw_update_strategy.py @@ -154,8 +154,7 @@ class SwUpdateStrategyController(object): max_parallel_subclouds = int(max_parallel_subclouds_str) except ValueError: pecan.abort(400, _('max-parallel-subclouds invalid')) - # TODO(Bart): Decide on a maximum - if max_parallel_subclouds < 1 or max_parallel_subclouds > 100: + if max_parallel_subclouds < 1 or max_parallel_subclouds > 500: pecan.abort(400, _('max-parallel-subclouds invalid')) stop_on_failure = payload.get('stop-on-failure') diff --git a/distributedcloud/dcmanager/orchestrator/orch_thread.py b/distributedcloud/dcmanager/orchestrator/orch_thread.py index 801d288a7..472cb6577 100644 --- a/distributedcloud/dcmanager/orchestrator/orch_thread.py +++ b/distributedcloud/dcmanager/orchestrator/orch_thread.py @@ -79,7 +79,7 @@ class OrchThread(threading.Thread): self._stop = threading.Event() # Keeps track of greenthreads we create to do work. self.thread_group_manager = scheduler.ThreadGroupManager( - thread_pool_size=100) + thread_pool_size=500) # Track worker created for each subcloud. self.subcloud_workers = dict() diff --git a/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py b/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py index 9ec8ad77e..366ee2751 100644 --- a/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py +++ b/distributedcloud/dcmanager/orchestrator/patch_orch_thread.py @@ -70,7 +70,7 @@ class PatchOrchThread(threading.Thread): self.audit_rpc_client = audit_rpc_client # Keeps track of greenthreads we create to do work. self.thread_group_manager = scheduler.ThreadGroupManager( - thread_pool_size=100) + thread_pool_size=500) # Track worker created for each subcloud. self.subcloud_workers = dict() # Used to store RegionOne patches. diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_group.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_group.py index 962b11830..c7b8c5707 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_group.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_subcloud_group.py @@ -178,10 +178,10 @@ class TestSubcloudGroupPost(testroot.DCManagerApiTest, @mock.patch.object(rpc_client, 'ManagerClient') def test_create_with_bad_max_parallel_subclouds(self, mock_client): - # max_parallel_subclouds must be an integer between 1 and 100 + # max_parallel_subclouds must be an integer between 1 and 500 ndict = self.get_post_object() # All the entries in bad_values should be considered invalid - bad_values = [0, 101, -1, 'abc'] + bad_values = [0, 501, -1, 'abc'] for bad_value in bad_values: ndict['max_parallel_subclouds'] = bad_value response = self.app.post_json(self.get_api_prefix(),