Optimize dc orchestrator scheduling

Currently DC orchestration divides the subclouds in stages and the
stage size is defined by the max_parallel_subclouds parameter or
by the size set for the group. Once a stage start being orchestrated,
it will wait for all subclouds of that stage to finish before starting
the next stage, causing the orchestrator to wait for slower subclouds.
The change is to remove the division by stage and keep adding a new
subcloud as soon as one finishes its orchestration in order to improve
the orchestrator speed.

Test Plan:
PASS: Create and apply a strategy with a specific subcloud and verify
that the orchestration finishes successfully.
PASS: Create and apply a strategy with a group of subclouds and verify
that the orchestrator uses the max_parallel_subclouds of that group.
PASS: Create and apply a strategy with max_parallel_subclouds
parameter and verify that the orchestrator uses that parameter.
PASS: Create and apply a strategy with stop_on_failure parameter and
verify that the orchestrator doesn't add any subclouds after the
failure is identified.
PASS: Create and apply a kubernetes strategy and verify that the
orchestration finishes successfully.
PASS: Create and apply a rootca strategy and verify that the
orchestration finishes successfully.
PASS: Create and apply a firmware strategy and verify that the
orchestration finishes successfully.
PASS: Create and apply a upgrade strategy and verify that the
orchestration finishes successfully.
PASS: Create and apply a patch with 100 subclouds in parallel
strategy and verify that the orchestration finishes successfully.
PASS: Create and apply a strategy with a group that has more
subclouds than the max_parallel_subclouds value and verify that
once a subcloud has been processed, another subcloud from
"unprocessed pool" takes its place.
PASS: Create and apply a strategy without specifying
--max_parallel_subclouds value having subclouds from different
groups that are elegible and verify that the orchestration
finishes successfully.

Story: 2010798
Task: 48417

Signed-off-by: Christopher Souza <Christopher.DeOliveiraSouza@windriver.com>
Change-Id: Ie8e8a62ce1314706fb359c1349ab09bc853d0e4a
This commit is contained in:
Christopher Souza 2023-07-18 16:35:16 -03:00
parent 42d7851bcc
commit 342690142f
7 changed files with 177 additions and 173 deletions

View File

@ -81,6 +81,11 @@ SW_UPDATE_STATE_DELETED = "deleted"
SW_UPDATE_ACTION_APPLY = "apply"
SW_UPDATE_ACTION_ABORT = "abort"
# Stage states
STAGE_SUBCLOUD_ORCHESTRATION_CREATED = 1
STAGE_SUBCLOUD_ORCHESTRATION_STARTED = 2
STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED = 3
# Subcloud apply types
SUBCLOUD_APPLY_TYPE_PARALLEL = "parallel"
SUBCLOUD_APPLY_TYPE_SERIAL = "serial"

View File

@ -161,6 +161,11 @@ def subcloud_get_all(context):
return IMPL.subcloud_get_all(context)
def subcloud_get_all_ordered_by_id(context):
"""Retrieve all subclouds ordered by id."""
return IMPL.subcloud_get_all_ordered_by_id(context)
def subcloud_get_all_with_status(context):
"""Retrieve all subclouds and sync statuses."""
return IMPL.subcloud_get_all_with_status(context)

View File

@ -324,6 +324,13 @@ def subcloud_get_all(context):
all()
def subcloud_get_all_ordered_by_id(context):
return model_query(context, models.Subcloud). \
filter_by(deleted=0). \
order_by(models.Subcloud.id). \
all()
@require_context
def subcloud_get_all_with_status(context):
result = model_query(context, models.Subcloud, models.SubcloudStatus). \

View File

@ -181,7 +181,7 @@ class OrchThread(threading.Thread):
return state_operator(
region_name=OrchThread.get_region_name(strategy_step))
def strategy_step_update(self, subcloud_id, state=None, details=None):
def strategy_step_update(self, subcloud_id, state=None, details=None, stage=None):
"""Update the strategy step in the DB
Sets the start and finished timestamp if necessary, based on state.
@ -197,12 +197,16 @@ class OrchThread(threading.Thread):
# Return the updated object, in case we need to use its updated values
return db_api.strategy_step_update(self.context,
subcloud_id,
stage=stage,
state=state,
details=details,
started_at=started_at,
finished_at=finished_at)
def _delete_subcloud_worker(self, region):
def _delete_subcloud_worker(self, region, subcloud_id):
db_api.strategy_step_update(self.context,
subcloud_id,
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED)
if region in self.subcloud_workers:
# The orchestration for this subcloud has either
# completed/failed/aborted, remove it from the
@ -253,24 +257,25 @@ class OrchThread(threading.Thread):
LOG.debug("(%s) Applying update strategy" % self.update_type)
strategy_steps = db_api.strategy_step_get_all(self.context)
# Figure out which stage we are working on
current_stage = None
stop_after_stage = None
stop = False
failure_detected = False
abort_detected = False
for strategy_step in strategy_steps:
if strategy_step.state == consts.STRATEGY_STATE_COMPLETE:
# This step is complete
self._delete_subcloud_worker(strategy_step.subcloud.name)
self._delete_subcloud_worker(strategy_step.subcloud.name,
strategy_step.subcloud_id)
continue
elif strategy_step.state == consts.STRATEGY_STATE_ABORTED:
# This step was aborted
self._delete_subcloud_worker(strategy_step.subcloud.name)
self._delete_subcloud_worker(strategy_step.subcloud.name,
strategy_step.subcloud_id)
abort_detected = True
continue
elif strategy_step.state == consts.STRATEGY_STATE_FAILED:
failure_detected = True
self._delete_subcloud_worker(strategy_step.subcloud.name)
self._delete_subcloud_worker(strategy_step.subcloud.name,
strategy_step.subcloud_id)
# This step has failed and needs no further action
if strategy_step.subcloud_id is None:
# Strategy on SystemController failed. We are done.
@ -288,13 +293,10 @@ class OrchThread(threading.Thread):
return
elif sw_update_strategy.stop_on_failure:
# We have been told to stop on failures
stop_after_stage = strategy_step.stage
current_stage = strategy_step.stage
stop = True
break
continue
# We have found the first step that isn't complete or failed.
# This is the stage we are working on now.
current_stage = strategy_step.stage
break
else:
# The strategy application is complete
@ -328,22 +330,16 @@ class OrchThread(threading.Thread):
self.trigger_audit()
return
if stop_after_stage is not None:
if stop:
work_remaining = False
# We are going to stop after the steps in this stage have finished.
for strategy_step in strategy_steps:
if strategy_step.stage == stop_after_stage:
if strategy_step.state != consts.STRATEGY_STATE_COMPLETE \
and strategy_step.state != \
consts.STRATEGY_STATE_FAILED:
# There is more work to do in this stage
work_remaining = True
break
# We are going to stop after the steps that are in progress finish.
if len(self.subcloud_workers) > 0:
work_remaining = True
if not work_remaining:
# We have completed the stage that failed
LOG.info("(%s) Stopping strategy due to failure in stage %d"
% (self.update_type, stop_after_stage))
# We have completed the remaining steps
LOG.info("(%s) Stopping strategy due to failure"
% self.update_type)
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context,
@ -353,30 +349,31 @@ class OrchThread(threading.Thread):
self.trigger_audit()
return
LOG.debug("(%s) Working on stage %d"
% (self.update_type, current_stage))
for strategy_step in strategy_steps:
if strategy_step.stage == current_stage:
region = self.get_region_name(strategy_step)
if self.stopped():
LOG.info("(%s) Exiting because task is stopped"
% self.update_type)
self.subcloud_workers.clear()
return
if strategy_step.state == \
consts.STRATEGY_STATE_FAILED:
LOG.debug("(%s) Intermediate step is failed"
% self.update_type)
self._delete_subcloud_worker(region)
continue
elif strategy_step.state == \
consts.STRATEGY_STATE_COMPLETE:
LOG.debug("(%s) Intermediate step is complete"
% self.update_type)
self._delete_subcloud_worker(region)
continue
elif strategy_step.state == \
consts.STRATEGY_STATE_INITIAL:
region = self.get_region_name(strategy_step)
if self.stopped():
LOG.info("(%s) Exiting because task is stopped"
% self.update_type)
self.subcloud_workers.clear()
return
if strategy_step.state == \
consts.STRATEGY_STATE_FAILED:
LOG.debug("(%s) Intermediate step is failed"
% self.update_type)
self._delete_subcloud_worker(region,
strategy_step.subcloud_id)
continue
elif strategy_step.state == \
consts.STRATEGY_STATE_COMPLETE:
LOG.debug("(%s) Intermediate step is complete"
% self.update_type)
self._delete_subcloud_worker(region,
strategy_step.subcloud_id)
continue
elif strategy_step.state == \
consts.STRATEGY_STATE_INITIAL:
if sw_update_strategy.max_parallel_subclouds > len(self.subcloud_workers) \
and not stop:
# Don't start upgrading this subcloud if it has been
# unmanaged by the user. If orchestration was already
# started, it will be allowed to complete.
@ -396,15 +393,16 @@ class OrchThread(threading.Thread):
# Use the updated value for calling process_update_step
strategy_step = self.strategy_step_update(
strategy_step.subcloud_id,
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_STARTED,
state=self.starting_state)
# Starting state should log an error if greenthread exists
self.process_update_step(region,
strategy_step,
log_error=True)
else:
self.process_update_step(region,
strategy_step,
log_error=False)
else:
self.process_update_step(region,
strategy_step,
log_error=False)
def abort(self, sw_update_strategy):
"""Abort an update strategy"""

View File

@ -258,15 +258,6 @@ class SwUpdateManager(manager.Manager):
strategy_type = payload.get('type')
# if use_group_apply_type = True, we use the subcloud_apply_type
# specified for each subcloud group
# else we use the subcloud_apply_type specified through CLI
use_group_apply_type = False
# if use_group_max_parallel = True, we use the max_parallel_subclouds
# value specified for each subcloud group
# else we use the max_parallel_subclouds value specified through CLI
use_group_max_parallel = False
single_group = None
subcloud_group = payload.get('subcloud_group')
if subcloud_group:
@ -274,18 +265,12 @@ class SwUpdateManager(manager.Manager):
subcloud_group)
subcloud_apply_type = single_group.update_apply_type
max_parallel_subclouds = single_group.max_parallel_subclouds
use_group_apply_type = True
use_group_max_parallel = True
else:
subcloud_apply_type = payload.get('subcloud-apply-type')
max_parallel_subclouds_str = payload.get('max-parallel-subclouds')
if not subcloud_apply_type:
use_group_apply_type = True
if not max_parallel_subclouds_str:
max_parallel_subclouds = None
use_group_max_parallel = True
else:
max_parallel_subclouds = int(max_parallel_subclouds_str)
@ -518,7 +503,12 @@ class SwUpdateManager(manager.Manager):
# handle extra_args processing such as staging to the vault
self._process_extra_args_creation(strategy_type, extra_args)
current_stage_counter = 0
if consts.SUBCLOUD_APPLY_TYPE_SERIAL == subcloud_apply_type:
max_parallel_subclouds = 1
if max_parallel_subclouds is None:
max_parallel_subclouds = consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS
strategy_step_created = False
# Create the strategy
strategy = db_api.sw_update_strategy_create(
@ -534,78 +524,44 @@ class SwUpdateManager(manager.Manager):
# out of sync
# special cases:
# - kube rootca update: the 'force' option allows in-sync subclouds
current_stage_counter += 1
stage_size = 0
stage_updated = False
if single_group:
groups = [single_group]
subclouds_list = db_api.subcloud_get_for_group(context, single_group.id)
else:
# Fetch all subcloud groups
groups = db_api.subcloud_group_get_all(context)
# Fetch all subclouds
subclouds_list = db_api.subcloud_get_all_ordered_by_id(context)
for group in groups:
# Fetch subcloud list for each group
subclouds_list = db_api.subcloud_get_for_group(context, group.id)
if use_group_max_parallel:
max_parallel_subclouds = group.max_parallel_subclouds
if use_group_apply_type:
subcloud_apply_type = group.update_apply_type
for subcloud in subclouds_list:
stage_updated = False
if (cloud_name and subcloud.name != cloud_name or
subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED):
# We are not targeting for update this subcloud
for subcloud in subclouds_list:
if (cloud_name and subcloud.name != cloud_name or
subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED):
# We are not targeting for update this subcloud
continue
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
if not force:
continue
else:
continue
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
if not force:
continue
else:
continue
subcloud_status = db_api.subcloud_status_get_all(context,
subcloud.id)
for status in subcloud_status:
if self._validate_subcloud_status_sync(strategy_type,
status,
force,
subcloud.availability_status):
LOG.debug("Creating strategy_step for endpoint_type: %s, "
"sync_status: %s, subcloud: %s, id: %s",
status.endpoint_type, status.sync_status,
subcloud.name, subcloud.id)
db_api.strategy_step_create(
context,
subcloud.id,
stage=current_stage_counter,
state=consts.STRATEGY_STATE_INITIAL,
details='')
strategy_step_created = True
# We have added a subcloud to this stage
stage_size += 1
if consts.SUBCLOUD_APPLY_TYPE_SERIAL in subcloud_apply_type:
# For serial apply type always move to next stage
stage_updated = True
current_stage_counter += 1
elif stage_size >= max_parallel_subclouds:
# For parallel apply type, move to next stage if we have
# reached the maximum subclouds for this stage
stage_updated = True
current_stage_counter += 1
stage_size = 0
# Reset the stage_size before iterating through a new subcloud group
stage_size = 0
# current_stage_counter value is updated only when subcloud_apply_type is serial
# or the max_parallel_subclouds limit is reached. If the value is updated
# for either one of these reasons and it also happens to be the last
# iteration for this particular group, the following check will prevent
# the current_stage_counter value from being updated twice
if not stage_updated:
current_stage_counter += 1
subcloud_status = db_api.subcloud_status_get_all(context,
subcloud.id)
for status in subcloud_status:
if self._validate_subcloud_status_sync(strategy_type,
status,
force,
subcloud.availability_status):
LOG.debug("Creating strategy_step for endpoint_type: %s, "
"sync_status: %s, subcloud: %s, id: %s",
status.endpoint_type, status.sync_status,
subcloud.name, subcloud.id)
db_api.strategy_step_create(
context,
subcloud.id,
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_CREATED,
state=consts.STRATEGY_STATE_INITIAL,
details='')
strategy_step_created = True
if strategy_step_created:
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(

View File

@ -5,6 +5,7 @@
#
import mock
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.common import exceptions as exception
@ -24,6 +25,29 @@ def non_threaded_start(some_function, some_arguments):
class TestFwOrchThread(TestSwUpdate):
@staticmethod
def create_subcloud(ctxt, name, group_id):
values = {
"name": name,
"description": "subcloud1 description",
"location": "subcloud1 location",
'software_version': "18.03",
"management_subnet": "192.168.101.0/24",
"management_gateway_ip": "192.168.101.1",
"management_start_ip": "192.168.101.3",
"management_end_ip": "192.168.101.4",
"systemcontroller_gateway_ip": "192.168.204.101",
'deploy_status': "not-deployed",
'error_description': 'No errors present',
'openstack_installed': False,
'group_id': group_id,
'data_install': 'data from install',
}
subcloud = db_api.subcloud_create(ctxt, **values)
state = dccommon_consts.MANAGEMENT_MANAGED
subcloud = db_api.subcloud_update(ctxt, subcloud.id,
management_state=state)
return subcloud
# Setting DEFAULT_STRATEGY_TYPE to firmware will setup the firmware
# orchestration worker, and will mock away the other orch threads
@ -47,6 +71,7 @@ class TestFwOrchThread(TestSwUpdate):
return fake_strategy.create_fake_strategy(
self.ctx,
consts.SW_UPDATE_TYPE_FIRMWARE,
max_parallel_subclouds=2,
state=state)
def test_delete_strategy_no_steps(self):
@ -66,6 +91,47 @@ class TestFwOrchThread(TestSwUpdate):
self.ctx,
consts.SW_UPDATE_TYPE_FIRMWARE)
@mock.patch.object(scheduler.ThreadGroupManager, 'start')
@mock.patch.object(OrchThread, 'perform_state_action')
def test_apply_strategy(self, mock_perform_state_action,
mock_start):
mock_start.side_effect = non_threaded_start
self.strategy = self.setup_strategy(
state=consts.SW_UPDATE_STATE_APPLYING)
subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1)
subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1)
subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 1)
self.setup_strategy_step(
subcloud2.id, consts.STRATEGY_STATE_INITIAL)
self.setup_strategy_step(
subcloud3.id, consts.STRATEGY_STATE_INITIAL)
self.setup_strategy_step(
subcloud4.id, consts.STRATEGY_STATE_INITIAL)
self.worker.apply(self.strategy)
steps = db_api.strategy_step_get_all(self.ctx)
# the orchestrator can orchestrate 2 subclouds at a time
self.assertEqual(steps[0].state, consts.STRATEGY_STATE_IMPORTING_FIRMWARE)
self.assertEqual(steps[1].state, consts.STRATEGY_STATE_IMPORTING_FIRMWARE)
self.assertEqual(steps[2].state, consts.STRATEGY_STATE_INITIAL)
# subcloud3 orchestration finished first
db_api.strategy_step_update(self.ctx,
subcloud3.id,
state=consts.STRATEGY_STATE_COMPLETE)
self.worker.apply(self.strategy)
steps = db_api.strategy_step_get_all(self.ctx)
# the subcloud3 finished thus the subcloud 4 should start
self.assertEqual(steps[0].state, consts.STRATEGY_STATE_IMPORTING_FIRMWARE)
self.assertEqual(steps[1].state, consts.STRATEGY_STATE_COMPLETE)
self.assertEqual(steps[2].state, consts.STRATEGY_STATE_IMPORTING_FIRMWARE)
@mock.patch.object(scheduler.ThreadGroupManager, 'start')
def test_delete_strategy_single_step_no_vim_strategy(self, mock_start):
# The 'strategy' needs to be in 'deleting'

View File

@ -295,7 +295,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.ctxt, payload=data)
# Verify strategy was created as expected using group values
self.assertEqual(response['max-parallel-subclouds'], 2)
self.assertEqual(response['max-parallel-subclouds'], 1)
self.assertEqual(response['subcloud-apply-type'],
consts.SUBCLOUD_APPLY_TYPE_SERIAL)
self.assertEqual(response['type'],
@ -305,8 +305,6 @@ class TestSwUpdateManager(base.DCManagerTestCase):
strategy_steps = db_api.strategy_step_get_all(self.ctx)
self.assertEqual(strategy_steps[0]['state'],
consts.STRATEGY_STATE_INITIAL)
self.assertEqual(strategy_steps[0]['stage'],
1)
self.assertEqual(strategy_steps[0]['details'],
'')
self.assertEqual(strategy_steps[0]['subcloud_id'],
@ -343,11 +341,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 2]
stage = [1, 1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(prestage, 'initial_subcloud_validate')
@mock.patch.object(prestage, 'global_prestage_validate')
@ -391,11 +387,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 2]
stage = [1, 1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(prestage, 'initial_subcloud_validate')
@mock.patch.object(prestage, 'global_prestage_validate')
@ -458,11 +452,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 2, 3, 4]
stage = [1, 1, 2, 2]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(prestage, 'initial_subcloud_validate')
@mock.patch.object(prestage, '_get_system_controller_upgrades')
@ -567,11 +559,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 3, 5, 6, 7]
stage = [1, 1, 2, 3, 3]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_patching_subcloud_in_sync_out_of_sync(
@ -629,16 +619,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 3]
# Both subclouds are added to the first stage (max-parallel-subclouds=2)
stage = [1, 1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
subcloud_id_processed = []
stage_processed = []
for strategy_step in strategy_step_list:
subcloud_id_processed.append(strategy_step.subcloud_id)
stage_processed.append(strategy_step.stage)
self.assertEqual(subcloud_ids, subcloud_id_processed)
self.assertEqual(stage, stage_processed)
@mock.patch.object(cutils, 'get_systemcontroller_installed_loads')
@mock.patch.object(prestage, 'initial_subcloud_validate')
@ -702,15 +687,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 3, 5, 6, 7]
stage = [1, 1, 2, 3, 3]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
subcloud_id_processed = []
stage_processed = []
for index, strategy_step in enumerate(strategy_step_list):
subcloud_id_processed.append(strategy_step.subcloud_id)
stage_processed.append(strategy_step.stage)
self.assertEqual(subcloud_ids, subcloud_id_processed)
self.assertEqual(stage, stage_processed)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_serial(
@ -760,17 +741,15 @@ class TestSwUpdateManager(base.DCManagerTestCase):
strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data)
# Assert that values passed through CLI are used instead of group values
self.assertEqual(strategy_dict['max-parallel-subclouds'], 2)
self.assertEqual(strategy_dict['max-parallel-subclouds'], 1)
self.assertEqual(strategy_dict['subcloud-apply-type'],
consts.SUBCLOUD_APPLY_TYPE_SERIAL)
# Verify the strategy step list
subcloud_ids = [1, 3, 5, 6, 7]
stage = [1, 2, 3, 4, 5]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_using_group_apply_type(
@ -859,11 +838,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]
stage = [1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_using_group_max_parallel(
@ -949,15 +926,14 @@ class TestSwUpdateManager(base.DCManagerTestCase):
consts.SUBCLOUD_APPLY_TYPE_PARALLEL)
# Assert that group values are being used for subcloud_apply_type
self.assertEqual(strategy_dict['max-parallel-subclouds'], None)
self.assertEqual(strategy_dict['max-parallel-subclouds'],
consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS)
# Verify the strategy step list
subcloud_ids = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]
stage = [1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_using_all_group_values(
@ -1039,16 +1015,15 @@ class TestSwUpdateManager(base.DCManagerTestCase):
strategy_dict = um.create_sw_update_strategy(self.ctxt, payload=data)
# Assert that group values are being used
self.assertEqual(strategy_dict['max-parallel-subclouds'], None)
self.assertEqual(strategy_dict['max-parallel-subclouds'],
consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS)
self.assertEqual(strategy_dict['subcloud-apply-type'], None)
# Verify the strategy step list
subcloud_ids = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]
stage = [1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_unknown_sync_status(
@ -1157,11 +1132,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [2, 3, 4]
stage = [1, 1, 1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_with_force_option(
@ -1205,11 +1178,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.assertEqual(strategy_dict['type'], consts.SW_UPDATE_TYPE_UPGRADE)
subcloud_ids = [1, 2]
stage = [1, 1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_without_force_option(
@ -1253,11 +1224,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.assertEqual(strategy_dict['type'], consts.SW_UPDATE_TYPE_UPGRADE)
subcloud_ids = [2]
stage = [1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_not_in_sync_offline_subcloud_with_force_upgrade(
@ -1287,11 +1256,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
# Verify the strategy step list
subcloud_ids = [1]
stage = [1]
strategy_step_list = db_api.strategy_step_get_all(self.ctxt)
for index, strategy_step in enumerate(strategy_step_list):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
self.assertEqual(stage[index], strategy_step.stage)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
def test_create_sw_update_strategy_in_sync_offline_subcloud_with_force_upgrade(