Update dcmanager audit to use usm API

In this commit, a new subcloud_status called software was created, it's
used when the usm switch is enabled to audit patches and loads using
the usm API. When the usm switch is disabled, the subcloud_status patch
and load are audited instead, using patch/sysinv API. The validations
to create an upgrade orchestration strategy were changed when the usm
switch is enabled, the subcloud_status software is checked instead of
the load status.

Test Plan:
PASS: Turn the usm switch on and add a subcloud and verify that
the subcloud has the subcloud_status software.
PASS: Turn the usm switch off and verify that the patch/load status
are audited.
PASS: Turn the usm switch on and verify that the software status
is audited.
PASS: Turn the usm switch on, upload a patch to the subcloud and verify
that the software status goes to out-of-sync.
PASS: Turn the usm switch on, apply a patch to the system controller and verify
that the software status goes to out-of-sync.
PASS: Turn the usm switch on, with the software status out-of-sync,
create an upgrade-strategy and verify that is created successfully.
PASS: Turn the usm switch on, restart audit and verify that the usm endpoint
was added to a existing subcloud.

Story: 2010676
Task: 48784

Signed-off-by: Christopher Souza <Christopher.DeOliveiraSouza@windriver.com>
Change-Id: If2d14c15a6ff4e38b004b24700d3443a2e86d2c2
This commit is contained in:
Christopher Souza 2023-09-18 07:57:15 -03:00 committed by Christopher de Oliveira Souza
parent 77300bb9d3
commit f1b5aad38a
14 changed files with 324 additions and 71 deletions

View File

@ -105,6 +105,16 @@ ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA]
ENDPOINT_TYPES_LIST_USM = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_PATCHING,
ENDPOINT_TYPE_IDENTITY,
ENDPOINT_TYPE_LOAD,
ENDPOINT_TYPE_DC_CERT,
ENDPOINT_TYPE_FIRMWARE,
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA,
ENDPOINT_TYPE_SOFTWARE]
# All endpoint audit requests
ENDPOINT_AUDIT_REQUESTS = {
ENDPOINT_TYPE_FIRMWARE: 'firmware_audit_requested',

View File

@ -22,6 +22,8 @@ from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import patching_v1
from dccommon.drivers.openstack.patching_v1 import PatchingClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack import software_v1
from dccommon.drivers.openstack.software_v1 import SoftwareClient
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcmanager.common import utils
@ -53,6 +55,27 @@ class PatchAuditData(object):
return cls(**values)
class SoftwareAuditData(object):
def __init__(self, releases, deployed_release_ids,
committed_release_ids):
self.releases = releases
self.deployed_release_ids = deployed_release_ids
self.committed_release_ids = committed_release_ids
def to_dict(self):
return {
'releases': self.releases,
'deployed_release_ids': self.deployed_release_ids,
'committed_release_ids': self.committed_release_ids,
}
@classmethod
def from_dict(cls, values):
if values is None:
return None
return cls(**values)
class PatchAudit(object):
"""Manages tasks related to patch audits."""
@ -81,6 +104,45 @@ class PatchAudit(object):
sysinv_client.region_name)
return upgrades
def get_software_regionone_audit_data(self):
"""Query RegionOne to determine what releases should be deployed
to the system as well as the current software version
:return: A new SoftwareAuditData object
"""
try:
m_os_ks_client = OpenStackDriver(
region_name=dccommon_consts.DEFAULT_REGION_NAME,
region_clients=None).keystone_client
software_endpoint = m_os_ks_client.endpoint_cache.get_endpoint(
dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
software_client = SoftwareClient(dccommon_consts.DEFAULT_REGION_NAME,
m_os_ks_client.session,
endpoint=software_endpoint)
except Exception:
LOG.exception('Failure initializing OS Client, skip software audit.')
return None
# First query RegionOne to determine what releases should be deployed
# to the system.
regionone_releases = software_client.query()
LOG.debug("regionone_releases: %s" % regionone_releases)
# Build lists of releases that should be deployed or committed in all
# subclouds, based on their state in RegionOne.
deployed_release_ids = list()
committed_release_ids = list()
for release_id in regionone_releases.keys():
if regionone_releases[release_id]['state'] == \
software_v1.DEPLOYED:
deployed_release_ids.append(release_id)
elif regionone_releases[release_id]['state'] == \
software_v1.COMMITTED:
committed_release_ids.append(release_id)
LOG.debug("RegionOne deployed_release_ids: %s" % deployed_release_ids)
LOG.debug("RegionOne committed_release_ids: %s" % committed_release_ids)
return SoftwareAuditData(regionone_releases, deployed_release_ids,
committed_release_ids)
def get_regionone_audit_data(self):
"""Query RegionOne to determine what patches should be applied
@ -133,7 +195,102 @@ class PatchAudit(object):
return PatchAuditData(regionone_patches, applied_patch_ids,
committed_patch_ids, regionone_software_version)
def subcloud_patch_audit(self, subcloud_name, subcloud_region, audit_data, do_load_audit):
def subcloud_audit(self, subcloud_name, subcloud_region, audit_data, software_audit_data,
do_load_audit):
if software_audit_data:
self.subcloud_software_audit(subcloud_name, subcloud_region, software_audit_data)
else:
self.subcloud_patch_audit(subcloud_name, subcloud_region, audit_data,
do_load_audit)
def subcloud_software_audit(self, subcloud_name, subcloud_region, audit_data):
LOG.info('Triggered software audit for: %s.' % subcloud_name)
try:
sc_os_client = OpenStackDriver(region_name=subcloud_region,
region_clients=None).keystone_client
session = sc_os_client.session
software_endpoint = sc_os_client.endpoint_cache. \
get_endpoint(dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
software_client = SoftwareClient(
subcloud_region, session,
endpoint=software_endpoint)
except (keystone_exceptions.EndpointNotFound,
keystone_exceptions.ConnectFailure,
keystone_exceptions.ConnectTimeout,
IndexError):
LOG.exception("Endpoint for online subcloud %s not found, skip "
"software audit." % subcloud_name)
return
# Retrieve all the releases that are present in this subcloud.
try:
subcloud_releases = software_client.query()
LOG.debug("Releases for subcloud %s: %s" %
(subcloud_name, subcloud_releases))
except Exception:
LOG.warn('Cannot retrieve releases for subcloud: %s, skip software '
'audit' % subcloud_name)
return
out_of_sync = False
# audit_data will be a dict due to passing through RPC so objectify it
audit_data = SoftwareAuditData.from_dict(audit_data)
# Check that all releases in this subcloud are in the correct
# state, based on the state of the release in RegionOne. For the
# subcloud.
for release_id in subcloud_releases.keys():
if subcloud_releases[release_id]['state'] == \
software_v1.DEPLOYED:
if release_id not in audit_data.deployed_release_ids:
if release_id not in audit_data.committed_release_ids:
LOG.debug("Release %s should not be deployed in %s" %
(release_id, subcloud_name))
else:
LOG.debug("Release %s should be committed in %s" %
(release_id, subcloud_name))
out_of_sync = True
elif subcloud_releases[release_id]['state'] == \
software_v1.COMMITTED:
if (release_id not in audit_data.committed_release_ids and
release_id not in audit_data.deployed_release_ids):
LOG.warn("Release %s should not be committed in %s" %
(release_id, subcloud_name))
out_of_sync = True
else:
# In steady state, all releases should either be deployed
# or committed in each subcloud. Release in other
# states mean a sync is required.
out_of_sync = True
# Check that all deployed or committed releases in RegionOne are
# present in the subcloud.
for release_id in audit_data.deployed_release_ids:
if release_id not in subcloud_releases:
LOG.debug("Release %s missing from %s" %
(release_id, subcloud_name))
out_of_sync = True
for release_id in audit_data.committed_release_ids:
if release_id not in subcloud_releases:
LOG.debug("Release %s missing from %s" %
(release_id, subcloud_name))
out_of_sync = True
if out_of_sync:
self._update_subcloud_sync_status(
subcloud_name,
subcloud_region, dccommon_consts.ENDPOINT_TYPE_SOFTWARE,
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
else:
self._update_subcloud_sync_status(
subcloud_name,
subcloud_region, dccommon_consts.ENDPOINT_TYPE_SOFTWARE,
dccommon_consts.SYNC_STATUS_IN_SYNC)
LOG.info('Software audit completed for: %s.' % subcloud_name)
def subcloud_patch_audit(self, subcloud_name, subcloud_region, audit_data,
do_load_audit):
LOG.info('Triggered patch audit for: %s.' % subcloud_name)
try:
sc_os_client = OpenStackDriver(region_name=subcloud_region,

View File

@ -129,7 +129,8 @@ class ManagerAuditWorkerClient(object):
firmware_audit_data=None,
kubernetes_audit_data=None,
do_openstack_audit=False,
kube_rootca_update_data=None):
kube_rootca_update_data=None,
software_audit_data=None):
"""Tell audit-worker to perform audit on the subclouds with these
subcloud IDs.
@ -141,7 +142,8 @@ class ManagerAuditWorkerClient(object):
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
do_openstack_audit=do_openstack_audit,
kube_rootca_update_audit_data=kube_rootca_update_data))
kube_rootca_update_audit_data=kube_rootca_update_data,
software_audit_data=software_audit_data))
def update_subcloud_endpoints(self, ctxt, subcloud_name, endpoints):
"""Update endpoints of services for a subcloud region"""

View File

@ -230,7 +230,8 @@ class DCManagerAuditWorkerService(service.Service):
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data):
kube_rootca_update_audit_data,
software_audit_data):
"""Used to trigger audits of the specified subcloud(s)"""
self.subcloud_audit_worker_manager.audit_subclouds(
context,
@ -239,7 +240,8 @@ class DCManagerAuditWorkerService(service.Service):
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data)
kube_rootca_update_audit_data,
software_audit_data)
@request_context
def update_subcloud_endpoints(self, context, subcloud_name, endpoints):

View File

@ -99,33 +99,41 @@ class SubcloudAuditManager(manager.Manager):
def _add_missing_endpoints(self):
# Update this flag file based on the most recent new endpoint
file_path = os.path.join(CONFIG_PATH,
'.kube_rootca_update_endpoint_added')
# If file exists on the controller, all the endpoints have been
# added to DB since last time an endpoint was added
if not os.path.isfile(file_path):
# Ensures all endpoints exist for all subclouds
# If the endpoint doesn't exist, an entry will be made
# in endpoint_status table
for subcloud in db_api.subcloud_get_all(self.context):
subcloud_statuses = \
db_api.subcloud_status_get_all(self.context,
subcloud.id)
# Use set difference to find missing endpoints
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
subcloud_set = set()
for subcloud_status in subcloud_statuses:
subcloud_set.add(subcloud_status.endpoint_type)
file_path_list = []
file_path_list.append(os.path.join(CONFIG_PATH,
'.kube_rootca_update_endpoint_added'))
if cfg.CONF.use_usm:
file_path_list.append(os.path.join(CONFIG_PATH,
'.usm_endpoint_added'))
for file_path in file_path_list:
# If file exists on the controller, all the endpoints have been
# added to DB since last time an endpoint was added
if not os.path.isfile(file_path):
# Ensures all endpoints exist for all subclouds
# If the endpoint doesn't exist, an entry will be made
# in endpoint_status table
for subcloud in db_api.subcloud_get_all(self.context):
subcloud_statuses = \
db_api.subcloud_status_get_all(self.context,
subcloud.id)
# Use set difference to find missing endpoints
if cfg.CONF.use_usm:
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST_USM)
else:
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
subcloud_set = set()
for subcloud_status in subcloud_statuses:
subcloud_set.add(subcloud_status.endpoint_type)
missing_endpoints = list(endpoint_type_set - subcloud_set)
missing_endpoints = list(endpoint_type_set - subcloud_set)
for endpoint in missing_endpoints:
db_api.subcloud_status_create(self.context,
subcloud.id,
endpoint)
# Add a flag on a replicated filesystem to avoid re-running
# the DB checks for missing subcloud endpoints
open(file_path, 'w').close()
for endpoint in missing_endpoints:
db_api.subcloud_status_create(self.context,
subcloud.id,
endpoint)
# Add a flag on a replicated filesystem to avoid re-running
# the DB checks for missing subcloud endpoints
open(file_path, 'w').close()
@classmethod
def trigger_firmware_audit(cls, context):
@ -304,12 +312,17 @@ class SubcloudAuditManager(manager.Manager):
audit_kube_rootca_updates):
"""Return the patch / firmware / kubernetes audit data as needed."""
patch_audit_data = None
software_audit_data = None
firmware_audit_data = None
kubernetes_audit_data = None
kube_rootca_update_audit_data = None
if audit_patch:
# Query RegionOne patches and software version
patch_audit_data = self.patch_audit.get_regionone_audit_data()
if cfg.CONF.use_usm:
# Query RegionOne releases
software_audit_data = self.patch_audit.get_software_regionone_audit_data()
else:
# Query RegionOne patches and software version
patch_audit_data = self.patch_audit.get_regionone_audit_data()
if audit_firmware:
# Query RegionOne firmware
firmware_audit_data = self.firmware_audit.get_regionone_audit_data()
@ -321,7 +334,8 @@ class SubcloudAuditManager(manager.Manager):
kube_rootca_update_audit_data = \
self.kube_rootca_update_audit.get_regionone_audit_data()
return (patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data)
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data)
def _periodic_subcloud_audit_loop(self):
"""Audit availability of subclouds loop."""
@ -422,7 +436,8 @@ class SubcloudAuditManager(manager.Manager):
% (audit_patch, audit_firmware,
audit_kubernetes, audit_kube_rootca_update))
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
self._get_audit_data(audit_patch,
audit_firmware,
audit_kubernetes,
@ -449,7 +464,8 @@ class SubcloudAuditManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data)
kube_rootca_update_audit_data,
software_audit_data)
LOG.debug('Sent subcloud audit request message for subclouds: %s' % subcloud_ids)
subcloud_ids = []
if len(subcloud_ids) > 0:
@ -461,7 +477,8 @@ class SubcloudAuditManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data)
kube_rootca_update_audit_data,
software_audit_data)
LOG.debug('Sent final subcloud audit request message for subclouds: %s' % subcloud_ids)
else:
LOG.debug('Done sending audit request messages.')

View File

@ -86,7 +86,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data):
kube_rootca_update_audit_data,
software_audit_data):
"""Run audits of the specified subcloud(s)"""
LOG.debug('PID: %s, subclouds to audit: %s, do_openstack_audit: %s' %
@ -162,6 +163,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -304,6 +306,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -321,6 +324,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -352,6 +356,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -489,12 +494,13 @@ class SubcloudAuditWorkerManager(manager.Manager):
failmsg = "Audit failure subcloud: %s, endpoint: %s"
# If we have patch audit data, audit the subcloud
if do_patch_audit and patch_audit_data:
if do_patch_audit and (patch_audit_data or software_audit_data):
try:
self.patch_audit.subcloud_patch_audit(subcloud_name,
subcloud_region,
patch_audit_data,
do_load_audit)
self.patch_audit.subcloud_audit(subcloud_name,
subcloud_region,
patch_audit_data,
software_audit_data,
do_load_audit)
audits_done.append('patch')
if do_load_audit:
audits_done.append('load')

View File

@ -24,6 +24,7 @@ import sqlalchemy
import sys
import threading
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.exception import DBDuplicateEntry
from oslo_db.sqlalchemy import enginefacade
@ -543,7 +544,11 @@ def subcloud_status_create(context, subcloud_id, endpoint_type):
@require_admin_context
def subcloud_status_create_all(context, subcloud_id):
with write_session() as session:
for endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
if cfg.CONF.use_usm:
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST_USM
else:
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST
for endpoint_type in endpoint_type_list:
subcloud_status_ref = models.SubcloudStatus()
subcloud_status_ref.subcloud_id = subcloud_id
subcloud_status_ref.endpoint_type = endpoint_type

View File

@ -138,11 +138,21 @@ class SwUpdateManager(manager.Manager):
elif strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# force option only has an effect in offline case for upgrade
if force and (availability_status != dccommon_consts.AVAILABILITY_ONLINE):
if cfg.CONF.use_usm:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status !=
dccommon_consts.SYNC_STATUS_IN_SYNC)
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status !=
dccommon_consts.SYNC_STATUS_IN_SYNC)
else:
if cfg.CONF.use_usm:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status ==
@ -332,8 +342,12 @@ class SwUpdateManager(manager.Manager):
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# Make sure subcloud requires upgrade
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD)
if cfg.CONF.use_usm:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
else:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD)
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
@ -453,6 +467,15 @@ class SwUpdateManager(manager.Manager):
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
if not force:
continue
elif cfg.CONF.use_usm:
if (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Software sync status is unknown for one or more '
'subclouds')
elif (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status ==

View File

@ -133,7 +133,8 @@ class DCManagerStateService(service.Service):
# If the patching sync status is being set to unknown, trigger the
# patching audit so it can update the sync status ASAP.
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING and \
if (endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING
or endpoint_type == dccommon_consts.ENDPOINT_TYPE_SOFTWARE) and \
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
self.audit_rpc_client.trigger_patch_audit(context)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -425,8 +425,8 @@ class TestFirmwareAudit(base.DCManagerTestCase):
return dict_results
def get_fw_audit_data(self, am):
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_root = \
am._get_audit_data(True, True, True, True)
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_root,\
software_audit_data = am._get_audit_data(True, True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -141,8 +141,8 @@ class TestKubernetesAudit(base.DCManagerTestCase):
return dict_results
def get_kube_audit_data(self, am):
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_rootca = \
am._get_audit_data(True, True, True, True)
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_rootca, \
software_audit_data = am._get_audit_data(True, True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
return kubernetes_audit_data

View File

@ -269,7 +269,8 @@ class TestPatchAudit(base.DCManagerTestCase):
def get_patch_audit_data(self, am):
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_data) = \
kubernetes_audit_data, kube_rootca_data,
software_audit_data) = \
am._get_audit_data(True, True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()

View File

@ -62,7 +62,7 @@ class ManagerRpcAuditAPITestCase(base.DCManagerTestCase):
# Without fanout the target is the same
rpcapi.audit_subclouds(
self.context, ['subcloud1', 'subcloud2'],
True, False, True, True, False)
True, False, True, True, False, False)
exp_msg2 = {'method': 'audit_subclouds',
'args': {'subcloud_ids': ['subcloud1', 'subcloud2'],
@ -70,7 +70,8 @@ class ManagerRpcAuditAPITestCase(base.DCManagerTestCase):
'firmware_audit_data': False,
'kubernetes_audit_data': True,
'do_openstack_audit': True,
'kube_rootca_update_audit_data': False},
'kube_rootca_update_audit_data': False,
'software_audit_data': False},
'version': '1.0'}
transport._send.assert_called_with(rpcapi._client.target,

View File

@ -58,6 +58,7 @@ class FakeAlarmAggregation(object):
class FakePatchAudit(object):
def __init__(self):
self.subcloud_audit = mock.MagicMock()
self.subcloud_patch_audit = mock.MagicMock()
self.get_regionone_audit_data = mock.MagicMock()
@ -409,7 +410,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -424,6 +426,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -449,8 +452,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
subcloud.name, self.fake_openstack_client.fm_client)
# Verify patch audit is called
self.fake_patch_audit.subcloud_patch_audit.assert_called_with(
subcloud.name, subcloud.region_name, patch_audit_data, do_load_audit)
self.fake_patch_audit.subcloud_audit.assert_called_with(
subcloud.name, subcloud.region_name, patch_audit_data,
software_audit_data, do_load_audit)
# Verify firmware audit is called
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_with(
@ -486,7 +490,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -501,6 +506,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -553,7 +559,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -568,6 +575,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -621,6 +629,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=None,
kubernetes_audit_data=None,
kube_rootca_update_audit_data=None,
software_audit_data=None,
do_patch_audit=False,
do_load_audit=False,
do_firmware_audit=False,
@ -664,6 +673,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=None,
kubernetes_audit_data=None,
kube_rootca_update_audit_data=None,
software_audit_data=None,
do_patch_audit=False,
do_load_audit=False,
do_firmware_audit=False,
@ -727,7 +737,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -740,6 +751,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
software_audit_data=software_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
@ -767,6 +779,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
software_audit_data=software_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
@ -787,8 +800,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
subcloud.name, self.fake_openstack_client.fm_client)
# Verify patch audit is called only once
self.fake_patch_audit.subcloud_patch_audit.assert_called_once_with(
subcloud.name, subcloud.region_name, mock.ANY, True)
self.fake_patch_audit.subcloud_audit.assert_called_once_with(
subcloud.name, subcloud.region_name, mock.ANY, mock.ANY, True)
# Verify firmware audit is only called once
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_once_with(
@ -825,7 +838,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -838,6 +852,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
software_audit_data=software_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
@ -897,7 +912,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=True,
kubernetes_audit_data=True,
do_openstack_audit=False,
kube_rootca_update_audit_data=True)
kube_rootca_update_audit_data=True,
software_audit_data=False)
# Verify if audit was not skipped
mock_subcloud_audits_end_audit.assert_not_called()
@ -929,7 +945,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -942,6 +959,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
software_audit_data=software_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
@ -980,7 +998,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=True,
kubernetes_audit_data=True,
do_openstack_audit=False,
kube_rootca_update_audit_data=True)
kube_rootca_update_audit_data=True,
software_audit_data=False)
# Verify if audit was skipped
mock_subcloud_audits_end_audit.assert_called_once()
@ -1011,7 +1030,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = True
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data
) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -1024,6 +1045,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
software_audit_data=software_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
@ -1097,6 +1119,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
None, # firmware_audit_data
None, # kubernetes_audit_data
None, # kube_rootca_update_audit_data
None, # software_audit_data
False, # do_patch_audit
False, # do_load_audit
False, # do_firmware_audit
@ -1160,6 +1183,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
None, # firmware_audit_data
None, # kubernetes_audit_data
None, # kube_roota_update_audit_data
None, # software_audit_data
False, # do_patch_audit
False, # do_load_audit,
False, # do_firmware_audit
@ -1222,6 +1246,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
None, # firmware_audit_data
None, # kubernetes_audit_data
None, # kube_rootca_update_audit_data
None, # software_audit_data
False, # do_patch_audit
False, # do_load_audit
False, # do_firmware_audit
@ -1278,7 +1303,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kubernetes_audit = False
do_kube_rootca_audit = False
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
kubernetes_audit_data, kube_rootca_update_audit_data,
software_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit,
@ -1304,6 +1330,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
software_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
@ -1311,8 +1338,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_kube_rootca_audit)
# Verify patch audit is called
self.fake_patch_audit.subcloud_patch_audit.assert_called_with(
subcloud.name, subcloud.region_name, patch_audit_data, do_load_audit)
self.fake_patch_audit.subcloud_audit.assert_called_with(
subcloud.name, subcloud.region_name, patch_audit_data,
software_audit_data, do_load_audit)
# Verify the _update_subcloud_audit_fail_count is not called
with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \