Improve rehome_data semantic check and name update

This commit adds a new rehome_data semantic check when attempting to
update which peer group a subcloud is part of. If rehome_data
is not already present, the request payload must contain both the
bootstrap-values and bootstrap-address; otherwise, the request will
be aborted.

Additionally, this commit updates the rehome_data during the subcloud
rename operation, guarenteeing that name is up-to-date.

Test Plan:
1. PASS - Attempt to add a subcloud with no rehome_data to a peer
          group under the following conditions and verify that it
          fails:
          - Without passing bootstrap-address and bootstrap-values
          - Passing only the bootstrap-address
          - Passing only the bootstrap-values
2. PASS - Add a subcloud with rehome_data to a peer group and verify
          that the operation succeeds regardless of the presence of
          bootstrap-address and bootstrap-values.
3. PASS - Rename a subcloud with rehome_data and verify that the
          rehome_data name field is updated to the new name.
4. PASS - Rename a subcloud without rehome_data and verify that the
          rename operation still works.
5. PASS - Migrate a renamed subcloud back and forth and verify that
          the migration completes successfully.

Closes-Bug: 2055883
Closes-Bug: 2056796

Change-Id: I4403dc50062db07a0de24e04139e3af8087c546f
Signed-off-by: Gustavo Herzmann <gustavo.herzmann@windriver.com>
This commit is contained in:
Gustavo Herzmann 2024-03-11 17:22:15 -03:00
parent 05e06bb9ec
commit 226e127fc0
4 changed files with 63 additions and 3 deletions

View File

@ -355,6 +355,9 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
_('Unable to sync the update to the peer site(s)'))
# Local update will continue if it's only partial
# failure.
# TODO(gherzmann): update the association sync status to
# out-of-date on partial failures when support for multiple
# associations is added (geo-redundancy phase 2)
LOG.error(f"Failed to sync the subcloud peer group "
f"{group.id} update on the peer site(s) "
f"{failed_peer_ids} with the values: "

View File

@ -708,11 +708,19 @@ class SubcloudsController(object):
"is prohibited."))
peer_group_id = 'none'
else:
if not (subcloud.rehome_data or (
payload.get('bootstrap_values') and
payload.get('bootstrap_address'))):
pecan.abort(400, _("Cannot update the subcloud "
"peer group: must provide both the "
"bootstrap-values and "
"bootstrap-address."))
if original_pgrp and original_pgrp.group_priority > 0 and \
str(subcloud.peer_group_id) != peer_group:
pecan.abort(400, _(
"Cannot update subcloud to a new peer group "
"if the original peer group has non-zero priority."))
"Cannot move subcloud to a new peer group if the "
"original peer group is not primary (non-zero "
"priority)."))
pgrp = utils.subcloud_peer_group_get_by_ref(context, peer_group)
if not pgrp:
pecan.abort(400, _('Invalid peer group'))

View File

@ -2719,6 +2719,15 @@ class SubcloudManager(manager.Manager):
self._rename_subcloud_ansible_files(curr_subcloud_name,
new_subcloud_name)
# Update the subcloud rehome_data with the new name
if subcloud.rehome_data:
rehome_data_dict = json.loads(subcloud.rehome_data)
if 'saved_payload' in rehome_data_dict:
rehome_data_dict['saved_payload']['name'] = new_subcloud_name
rehome_data = json.dumps(rehome_data_dict)
subcloud = db_api.subcloud_update(context, subcloud_id,
rehome_data=rehome_data)
return subcloud
def get_subcloud_name_by_region_name(self,

View File

@ -1765,7 +1765,10 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
deploy_status=consts.DEPLOY_STATE_DONE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
prestage_status=consts.PRESTAGE_STATE_COMPLETE
prestage_status=consts.PRESTAGE_STATE_COMPLETE,
rehome_data="{\"saved_payload\": "
"{\"system_mode\": \"simplex\","
"\"bootstrap-address\": \"192.168.100.100\"}}"
)
mock_is_leader_on_local_site.return_value = True
data = {"peer_group": peer_group.id}
@ -1780,6 +1783,43 @@ class TestSubcloudAPIOther(testroot.DCManagerApiTest):
self.mock_rpc_client().update_association_sync_status. \
assert_called_once()
@mock.patch.object(subclouds.SubcloudsController, "_get_patch_data")
@mock.patch.object(cutils, 'is_leader_on_local_site')
@mock.patch.object(cutils, 'subcloud_peer_group_get_by_ref')
def test_add_subcloud_to_peer_group_without_rehome_data(
self, mock_get_peer_group, mock_is_leader_on_local_site,
mock_get_patch_data
):
peer_group = test_system_peer_manager.TestSystemPeerManager. \
create_subcloud_peer_group_static(
self.ctx, peer_group_name='SubcloudPeerGroup1')
mock_get_peer_group.return_value = peer_group
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
subcloud = db_api.subcloud_update(
self.ctx,
subcloud.id,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
deploy_status=consts.DEPLOY_STATE_DONE,
management_state=dccommon_consts.MANAGEMENT_MANAGED,
prestage_status=consts.PRESTAGE_STATE_COMPLETE
)
mock_is_leader_on_local_site.return_value = True
data = {"peer_group": peer_group.id}
self.mock_rpc_client().update_subcloud.return_value = True
mock_get_patch_data.return_value = data
six.assertRaisesRegex(
self,
webtest.app.AppError,
"400 *",
self.app.patch_json,
FAKE_URL + "/" + str(subcloud.id),
headers=FAKE_HEADERS,
params=data,
)
self.mock_rpc_client().update_subcloud.assert_not_called()
self.mock_rpc_client().update_association_sync_status. \
assert_not_called()
@mock.patch.object(subclouds.SubcloudsController, "_get_patch_data")
@mock.patch.object(cutils, 'is_leader_on_local_site')
@mock.patch.object(cutils, 'subcloud_peer_group_get_by_ref')