Remove the use_usm flag

With the creation of the new CLI [1] command software-deploy-strategy,
the use_usm flag is no longer needed.

The following commits will be reverted after this is merged:
- https://review.opendev.org/c/starlingx/stx-puppet/+/890326
- https://review.opendev.org/c/starlingx/stx-puppet/+/891343

Test Plan:
PASS: Execute the software orchestration successfully.
- Use the new CLI [1] to execute software orchestration
- Verify create, show, apply, abort, and delete for new strategy type: software
- Software, load, and patching audits working properly.
PASS: Execute the upgrade orchestration
- Verify create, show, apply, abort, and delete for type: upgrade

1: https://review.opendev.org/c/starlingx/distcloud-client/+/908539

Story: 2010676
Task: 49556

Change-Id: I38c3868e054cf22dd986016ae5226aaf0e70dd67
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito 2024-02-08 22:43:28 -03:00
parent 9e42f10470
commit 591726bea6
21 changed files with 187 additions and 304 deletions

View File

@ -1,4 +1,4 @@
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -103,17 +103,8 @@ ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_DC_CERT,
ENDPOINT_TYPE_FIRMWARE,
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA]
ENDPOINT_TYPES_LIST_USM = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_PATCHING,
ENDPOINT_TYPE_IDENTITY,
ENDPOINT_TYPE_LOAD,
ENDPOINT_TYPE_DC_CERT,
ENDPOINT_TYPE_FIRMWARE,
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA,
ENDPOINT_TYPE_SOFTWARE]
ENDPOINT_TYPE_KUBE_ROOTCA,
ENDPOINT_TYPE_SOFTWARE]
# All endpoint audit requests
ENDPOINT_AUDIT_REQUESTS = {

View File

@ -1,5 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -43,7 +43,8 @@ SUPPORTED_STRATEGY_TYPES = [
consts.SW_UPDATE_TYPE_KUBERNETES,
consts.SW_UPDATE_TYPE_PATCH,
consts.SW_UPDATE_TYPE_PRESTAGE,
consts.SW_UPDATE_TYPE_UPGRADE
consts.SW_UPDATE_TYPE_UPGRADE,
consts.SW_UPDATE_TYPE_SOFTWARE,
]
# some strategies allow force for all subclouds

View File

@ -98,14 +98,10 @@ class SubcloudAuditManager(manager.Manager):
def _add_missing_endpoints(self):
# Update this flag file based on the most recent new endpoint
file_path_list = []
file_path_list.append(os.path.join(
CONFIG_PATH, '.kube_rootca_update_endpoint_added')
)
if cfg.CONF.use_usm:
file_path_list.append(os.path.join(
CONFIG_PATH, '.usm_endpoint_added')
)
file_path_list = [
os.path.join(CONFIG_PATH, '.kube_rootca_update_endpoint_added'),
os.path.join(CONFIG_PATH, '.usm_endpoint_added')
]
for file_path in file_path_list:
# If file exists on the controller, all the endpoints have been
# added to DB since last time an endpoint was added
@ -114,16 +110,10 @@ class SubcloudAuditManager(manager.Manager):
# If the endpoint doesn't exist, an entry will be made
# in endpoint_status table
for subcloud in db_api.subcloud_get_all(self.context):
subcloud_statuses = \
db_api.subcloud_status_get_all(self.context,
subcloud.id)
subcloud_statuses = db_api.subcloud_status_get_all(
self.context, subcloud.id)
# Use set difference to find missing endpoints
if cfg.CONF.use_usm:
endpoint_type_set = set(
dccommon_consts.ENDPOINT_TYPES_LIST_USM
)
else:
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
subcloud_set = set()
for subcloud_status in subcloud_statuses:
subcloud_set.add(subcloud_status.endpoint_type)
@ -320,14 +310,17 @@ class SubcloudAuditManager(manager.Manager):
firmware_audit_data = None
kubernetes_audit_data = None
kube_rootca_update_audit_data = None
# TODO(nicodemos): After the integration with VIM the patch audit and patch
# orchestration will be removed from the dcmanager. The audit_patch will
# be substituted by the software_audit. The software_audit will be
# responsible for the patch and load audit.
if audit_patch:
if cfg.CONF.use_usm:
# Query RegionOne releases
software_audit_data = \
self.patch_audit.get_software_regionone_audit_data()
else:
# Query RegionOne patches and software version
patch_audit_data = self.patch_audit.get_regionone_audit_data()
# Query RegionOne releases
software_audit_data = (
self.patch_audit.get_software_regionone_audit_data())
# Query RegionOne patches and software version
patch_audit_data = self.patch_audit.get_regionone_audit_data()
if audit_firmware:
# Query RegionOne firmware
firmware_audit_data = self.firmware_audit.get_regionone_audit_data()

View File

@ -1,5 +1,5 @@
# Copyright 2016 Ericsson AB
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -167,11 +167,6 @@ common_opts = [
'1:enabled via rvmc_debug_level, 2:globally enabled')
]
usm_opts = [
cfg.BoolOpt('use_usm', default=False,
help='parameter to enable usm api')
]
scheduler_opt_group = cfg.OptGroup(name='scheduler',
title='Scheduler options for periodic job')
keystone_opt_group = cfg.OptGroup(name='keystone_authtoken',
@ -194,7 +189,6 @@ def list_opts():
yield pecan_group.name, pecan_opts
yield None, global_opts
yield None, common_opts
yield None, usm_opts
def register_options():

View File

@ -27,7 +27,6 @@ TOPIC_DC_MANAGER_ORCHESTRATOR = "dcmanager-orchestrator"
CERTS_VAULT_DIR = "/opt/dc-vault/certs"
LOADS_VAULT_DIR = "/opt/dc-vault/loads"
PATCH_VAULT_DIR = "/opt/dc-vault/patches"
RELEASE_VAULT_DIR = "/opt/dc-vault/software"
BOOTSTRAP_VALUES = 'bootstrap_values'
BOOTSTRAP_ADDRESS = 'bootstrap-address'

View File

@ -22,30 +22,25 @@ import json
import os
import pwd
import re
import resource as sys_resource
import string
import subprocess
import uuid
import resource as sys_resource
import xml.etree.ElementTree as ElementTree
import yaml
import pecan
from keystoneauth1 import exceptions as keystone_exceptions
import netaddr
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import base64
import pecan
import six.moves
import tsconfig.tsconfig as tsc
import yaml
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack import software_v1
from dccommon.drivers.openstack.software_v1 import SoftwareClient
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon.drivers.openstack import vim
from dccommon import exceptions as dccommon_exceptions
@ -423,10 +418,7 @@ def get_vault_load_files(target_version):
in 'iso' or 'sig'.
: param target_version: The software version to search under the vault
"""
if cfg.CONF.use_usm:
vault_dir = "{}/{}/".format(consts.RELEASE_VAULT_DIR, target_version)
else:
vault_dir = "{}/{}/".format(consts.LOADS_VAULT_DIR, target_version)
vault_dir = "{}/{}/".format(consts.LOADS_VAULT_DIR, target_version)
matching_iso = None
matching_sig = None
@ -1066,37 +1058,12 @@ def get_systemcontroller_installed_loads():
dccommon_consts.SYSTEM_CONTROLLER_NAME)
raise
ks_client = os_client.keystone_client
if cfg.CONF.use_usm:
software_client = SoftwareClient(
dccommon_consts.SYSTEM_CONTROLLER_NAME,
ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('usm'))
releases = software_client.query()
return get_loads_for_prestage_usm(releases)
else:
sysinv_client = SysinvClient(
dccommon_consts.SYSTEM_CONTROLLER_NAME, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
sysinv_client = SysinvClient(
dccommon_consts.SYSTEM_CONTROLLER_NAME, ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
loads = sysinv_client.get_loads()
return get_loads_for_prestage(loads)
def get_loads_for_prestage_usm(releases):
"""Filter the loads that can be prestaged.
Return their software versions with the XX.XX format (e.g. 24.03).
"""
valid_states = [
software_v1.AVAILABLE,
software_v1.DEPLOYED,
software_v1.UNAVAILABLE,
software_v1.COMMITTED
]
return [".".join(releases[release]['sw_version'].split('.', 2)[:2])
for release in releases
if (releases[release]['state'] in valid_states and
releases[release]['sw_version'].endswith('.0'))]
loads = sysinv_client.get_loads()
return get_loads_for_prestage(loads)
def get_certificate_from_secret(secret_name, secret_ns):

View File

@ -23,7 +23,6 @@ import datetime
import sys
import threading
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.exception import DBDuplicateEntry
from oslo_db.sqlalchemy import enginefacade
@ -551,11 +550,7 @@ def subcloud_status_create(context, subcloud_id, endpoint_type):
@require_admin_context
def subcloud_status_create_all(context, subcloud_id):
with write_session() as session:
if cfg.CONF.use_usm:
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST_USM
else:
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST
for endpoint_type in endpoint_type_list:
for endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
subcloud_status_ref = models.SubcloudStatus()
subcloud_status_ref.subcloud_id = subcloud_id
subcloud_status_ref.endpoint_type = endpoint_type

View File

@ -19,7 +19,6 @@ import os
import shutil
import threading
from oslo_config import cfg
from oslo_log import log as logging
from tsconfig.tsconfig import SW_VERSION
@ -58,58 +57,56 @@ class SwUpdateManager(manager.Manager):
# Used to notify dcmanager-audit
self.audit_rpc_client = dcmanager_audit_rpc_client.ManagerAuditClient()
# Define which API will be used
self.use_usm = cfg.CONF.use_usm
# todo(abailey): refactor/decouple orch threads into a list
# Start worker threads
if self.use_usm:
# - software orchestration thread
self.software_orch_thread = SoftwareOrchThread(self.strategy_lock,
self.audit_rpc_client)
self.software_orch_thread.start()
else:
# - patch orchestration thread
self.patch_orch_thread = PatchOrchThread(self.strategy_lock,
self.audit_rpc_client)
self.patch_orch_thread.start()
# - sw upgrade orchestration thread
self.sw_upgrade_orch_thread = SwUpgradeOrchThread(self.strategy_lock,
self.audit_rpc_client)
self.sw_upgrade_orch_thread.start()
# - fw update orchestration thread
self.fw_update_orch_thread = FwUpdateOrchThread(self.strategy_lock,
self.audit_rpc_client)
# - software orchestration thread
self.software_orch_thread = SoftwareOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.software_orch_thread.start()
# - patch orchestration thread
self.patch_orch_thread = PatchOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.patch_orch_thread.start()
# - sw upgrade orchestration thread
self.sw_upgrade_orch_thread = SwUpgradeOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.sw_upgrade_orch_thread.start()
# - fw update orchestration thread
self.fw_update_orch_thread = FwUpdateOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.fw_update_orch_thread.start()
# - kube upgrade orchestration thread
self.kube_upgrade_orch_thread = \
KubeUpgradeOrchThread(self.strategy_lock, self.audit_rpc_client)
self.kube_upgrade_orch_thread = KubeUpgradeOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.kube_upgrade_orch_thread.start()
# - kube rootca update orchestration thread
self.kube_rootca_update_orch_thread = \
KubeRootcaUpdateOrchThread(self.strategy_lock,
self.audit_rpc_client)
self.kube_rootca_update_orch_thread = KubeRootcaUpdateOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.kube_rootca_update_orch_thread.start()
self.prestage_orch_thread = PrestageOrchThread(self.strategy_lock,
self.audit_rpc_client)
# - prestage orchestration thread
self.prestage_orch_thread = PrestageOrchThread(
self.strategy_lock, self.audit_rpc_client)
self.prestage_orch_thread.start()
def stop(self):
# Stop (and join) the worker threads
if self.use_usm:
# - software orchestration thread
self.software_orch_thread.stop()
self.software_orch_thread.join()
else:
# - patch orchestration thread
self.patch_orch_thread.stop()
self.patch_orch_thread.join()
# - sw upgrade orchestration thread
self.sw_upgrade_orch_thread.stop()
self.sw_upgrade_orch_thread.join()
# - software orchestration thread
self.software_orch_thread.stop()
self.software_orch_thread.join()
# - patch orchestration thread
self.patch_orch_thread.stop()
self.patch_orch_thread.join()
# - sw upgrade orchestration thread
self.sw_upgrade_orch_thread.stop()
self.sw_upgrade_orch_thread.join()
# - fw update orchestration thread
self.fw_update_orch_thread.stop()
self.fw_update_orch_thread.join()
@ -137,28 +134,27 @@ class SwUpdateManager(manager.Manager):
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# force option only has an effect in offline case for upgrade
if force and (
availability_status != dccommon_consts.AVAILABILITY_ONLINE
):
if cfg.CONF.use_usm:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status !=
dccommon_consts.SYNC_STATUS_IN_SYNC)
if force and availability_status != dccommon_consts.AVAILABILITY_ONLINE:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status !=
dccommon_consts.SYNC_STATUS_IN_SYNC)
else:
if cfg.CONF.use_usm:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE:
if force and availability_status != dccommon_consts.AVAILABILITY_ONLINE:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status !=
dccommon_consts.SYNC_STATUS_IN_SYNC)
else:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_FIRMWARE and
@ -192,12 +188,8 @@ class SwUpdateManager(manager.Manager):
# For prestage we reuse the ENDPOINT_TYPE_LOAD.
# We just need to key off a unique endpoint,
# so that the strategy is created only once.
if cfg.CONF.use_usm:
return (subcloud_status.endpoint_type
== dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
else:
return (subcloud_status.endpoint_type
== dccommon_consts.ENDPOINT_TYPE_LOAD)
return (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD)
# Unimplemented strategy_type status check. Log an error
LOG.error("_validate_subcloud_status_sync for %s not implemented" %
strategy_type)
@ -348,17 +340,21 @@ class SwUpdateManager(manager.Manager):
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
# Make sure subcloud requires upgrade
if cfg.CONF.use_usm:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
else:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD)
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_LOAD)
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require upgrade' % cloud_name)
elif strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
if subcloud_status.sync_status == \
dccommon_consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg=f'Subcloud {cloud_name} does not require deploy')
elif strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id, dccommon_consts.ENDPOINT_TYPE_FIRMWARE)
@ -479,15 +475,6 @@ class SwUpdateManager(manager.Manager):
dccommon_consts.AVAILABILITY_ONLINE:
if not force:
continue
elif cfg.CONF.use_usm:
if (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Software sync status is unknown for one or more '
'subclouds')
elif (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status ==
@ -496,6 +483,19 @@ class SwUpdateManager(manager.Manager):
resource='strategy',
msg='Upgrade sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:
if not force:
continue
if (subcloud_status.endpoint_type ==
dccommon_consts.ENDPOINT_TYPE_SOFTWARE and
subcloud_status.sync_status ==
dccommon_consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Software sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_PATCH:
if subcloud.availability_status != \
dccommon_consts.AVAILABILITY_ONLINE:

View File

@ -413,6 +413,10 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
self.addCleanup(p.stop)
p = mock.patch.object(patch_audit, 'SoftwareClient')
self.mock_patch_audit_sc = p.start()
self.addCleanup(p.stop)
def _rpc_convert(self, object_list):
# Convert to dict like what would happen calling via RPC
dict_results = []

View File

@ -110,6 +110,10 @@ class TestKubernetesAudit(base.DCManagerTestCase):
self.mock_patch_audit_pc.return_value = mock.MagicMock()
self.addCleanup(p.stop)
p = mock.patch.object(patch_audit, 'SoftwareClient')
self.mock_patch_audit_sc = p.start()
self.addCleanup(p.stop)
p = mock.patch.object(firmware_audit, 'OpenStackDriver')
self.mock_firmware_audit_driver = p.start()
self.mock_firmware_audit_driver.return_value = mock.MagicMock()

View File

@ -245,7 +245,7 @@ class FakeSysinvClientOneLoadUpgradeInProgress(object):
class TestPatchAudit(base.DCManagerTestCase):
def setUp(self):
super(TestPatchAudit, self).setUp()
super().setUp()
self.ctxt = utils.dummy_context()
# Mock the DCManager subcloud state API
@ -263,6 +263,11 @@ class TestPatchAudit(base.DCManagerTestCase):
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
self.addCleanup(p.stop)
# Mock the Software Client
p = mock.patch.object(patch_audit, 'SoftwareClient')
self.mock_patch_audit_sc = p.start()
self.addCleanup(p.stop)
def get_patch_audit_data(self, am):
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_data,

View File

@ -37,6 +37,7 @@ class FakePatchAudit(object):
def __init__(self):
self.get_regionone_audit_data = mock.MagicMock()
self.get_software_regionone_audit_data = mock.MagicMock()
class FakeFirmwareAudit(object):

View File

@ -63,6 +63,7 @@ class FakePatchAudit(object):
self.subcloud_audit = mock.MagicMock()
self.subcloud_patch_audit = mock.MagicMock()
self.get_regionone_audit_data = mock.MagicMock()
self.get_software_regionone_audit_data = mock.MagicMock()
class FakeFirmwareAudit(object):

View File

@ -5,8 +5,6 @@
#
import mock
from oslo_config import cfg
from dcmanager.common import consts
from dcmanager.orchestrator.states.software.finish_strategy import \
FinishStrategyState
@ -35,10 +33,6 @@ SUBCLOUD_RELEASES = {"DC.1": {"sw_version": "20.12",
class TestFinishStrategyState(TestSoftwareOrchestrator):
def setUp(self):
p = mock.patch.object(cfg.CONF, 'use_usm')
self.mock_use_usm = p.start()
self.mock_use_usm.return_value = True
self.addCleanup(p.stop)
super().setUp()
self.on_success_state = consts.STRATEGY_STATE_COMPLETE

View File

@ -201,6 +201,14 @@ class TestSwUpdateManager(base.DCManagerTestCase):
)
self.addCleanup(p.stop)
self.fake_software_orch_thread = FakeOrchThread()
p = mock.patch.object(sw_update_manager, "SoftwareOrchThread")
self.fake_software_orch_thread = p.start()
self.fake_software_orch_thread.return_value = (
self.fake_software_orch_thread
)
self.addCleanup(p.stop)
self.fake_fw_update_orch_thread = FakeOrchThread()
p = mock.patch.object(sw_update_manager, "FwUpdateOrchThread")
self.mock_fw_update_orch_thread = p.start()

View File

@ -110,12 +110,10 @@ class Acceptor(Router):
def add_patch_routes(self, app, conf, mapper):
api_controller = PatchAPIController(app, conf)
if cfg.CONF.use_usm:
for key, value in proxy_consts.SOFTWARE_PATH_MAP.items():
self._add_resource(mapper, api_controller, value, key, CONF.type)
else:
for key, value in proxy_consts.PATCH_PATH_MAP.items():
self._add_resource(mapper, api_controller, value, key, CONF.type)
for key, value in proxy_consts.SOFTWARE_PATH_MAP.items():
self._add_resource(mapper, api_controller, value, key, CONF.type)
for key, value in proxy_consts.PATCH_PATH_MAP.items():
self._add_resource(mapper, api_controller, value, key, CONF.type)
def add_identity_routes(self, app, conf, mapper):
api_controller = IdentityAPIController(app, conf)

View File

@ -405,7 +405,7 @@ class SysinvAPIController(APIController):
# load-import is stored in dc-vault and on /scratch temporary
# folder to be processed by sysinv
if self._is_load_import(request.path) and not cfg.CONF.use_usm:
if self._is_load_import(request.path):
req_body = self._store_load_to_vault(req)
params_dict = request.POST
try:
@ -455,14 +455,13 @@ class SysinvAPIController(APIController):
operation_type = proxy_utils.get_operation_type(environ)
if self.get_status_code(response) in self.OK_STATUS_CODE:
if resource_type == consts.RESOURCE_TYPE_SYSINV_LOAD:
if not cfg.CONF.use_usm:
if operation_type == consts.OPERATION_TYPE_POST:
new_load = json.loads(response.body)
self._save_load_to_vault(new_load['software_version'])
else:
sw_version = \
json.loads(response.body)['software_version']
self._remove_load_from_vault(sw_version)
if operation_type == consts.OPERATION_TYPE_POST:
new_load = json.loads(response.body)
self._save_load_to_vault(new_load['software_version'])
else:
sw_version = \
json.loads(response.body)['software_version']
self._remove_load_from_vault(sw_version)
elif resource_type == consts.RESOURCE_TYPE_SYSINV_DEVICE_IMAGE:
notify = True
if operation_type == consts.OPERATION_TYPE_POST:

View File

@ -40,28 +40,19 @@ from dcorch.common import context
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
if cfg.CONF.use_usm:
patch_opts = [
cfg.StrOpt('patch_vault',
default='/opt/dc-vault/software/',
help='file system for software storage on SystemController'),
]
else:
patch_opts = [
cfg.StrOpt('patch_vault',
default='/opt/dc-vault/patches/',
help='file system for patch storage on SystemController'),
]
patch_opts = [
cfg.StrOpt('patch_vault',
default='/opt/dc-vault/patches/',
help='file system for patch storage on SystemController'),
]
CONF.register_opts(patch_opts, CONF.type)
class PatchAPIController(Middleware):
if cfg.CONF.use_usm:
ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_SOFTWARE
else:
ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_PATCHING
ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_PATCHING
USM_ENDPOINT_TYPE = dccommon_consts.ENDPOINT_TYPE_SOFTWARE
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
@ -75,22 +66,15 @@ class PatchAPIController(Middleware):
self.ctxt = context.get_admin_context()
self._default_dispatcher = APIDispatcher(app)
self.dcmanager_state_rpc_client = dcmanager_rpc_client.SubcloudStateClient()
if cfg.CONF.use_usm:
self.response_hander_map = {
proxy_consts.SOFTWARE_ACTION_UPLOAD: self.patch_upload_usm_req,
proxy_consts.SOFTWARE_ACTION_UPLOAD_DIR: self.patch_upload_dir_req,
proxy_consts.SOFTWARE_ACTION_DELETE: self.patch_delete_req,
proxy_consts.SOFTWARE_ACTION_COMMIT_PATCH: self.notify,
}
else:
self.response_hander_map = {
proxy_consts.PATCH_ACTION_UPLOAD: self.patch_upload_req,
proxy_consts.PATCH_ACTION_UPLOAD_DIR: self.patch_upload_dir_req,
proxy_consts.PATCH_ACTION_DELETE: self.patch_delete_req,
proxy_consts.PATCH_ACTION_APPLY: self.notify,
proxy_consts.PATCH_ACTION_COMMIT: self.notify,
proxy_consts.PATCH_ACTION_REMOVE: self.notify,
}
self.response_hander_map = {
proxy_consts.PATCH_ACTION_UPLOAD: self.patch_upload_req,
proxy_consts.PATCH_ACTION_UPLOAD_DIR: self.patch_upload_dir_req,
proxy_consts.PATCH_ACTION_DELETE: self.patch_delete_req,
proxy_consts.PATCH_ACTION_APPLY: self.notify,
proxy_consts.PATCH_ACTION_COMMIT: self.notify,
proxy_consts.PATCH_ACTION_REMOVE: self.notify,
proxy_consts.SOFTWARE_ACTION_COMMIT_PATCH: self.notify_usm,
}
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
@ -115,8 +99,6 @@ class PatchAPIController(Middleware):
def copy_patch_to_version_vault(self, patch):
try:
sw_version = get_release_from_patch(patch)
if cfg.CONF.use_usm:
sw_version = ".".join(sw_version.split('.', 2)[:2])
except Exception:
msg = "Unable to fetch release version from patch"
LOG.error(msg)
@ -143,10 +125,10 @@ class PatchAPIController(Middleware):
os.remove(fn)
return
except OSError:
msg = ("Unable to remove patch file (%s) from the central"
"storage." % fn)
msg = (f"Unable to remove patch file {fn} from the central "
"storage.")
raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
LOG.info("Patch (%s) was not found in (%s)", patch, vault)
LOG.info(f"Patch {patch} was not found in {vault}")
def store_patch_file(self, filename, fileno):
# the following copy method is taken from from api/controllers/root.py
@ -189,31 +171,6 @@ class PatchAPIController(Middleware):
proxy_utils.cleanup(request.environ)
return response
# TODO(cdeolive): update to handle upload and delete of loads
# when USM upgrade API is ready. Rename de iso and sig with
# metadata ID
def patch_upload_usm_req(self, request, response):
# stores release in the release storage
file_items = []
for key in request.POST.keys():
file_items.append(request.POST[key])
try:
for file_item in file_items:
self.store_patch_file(file_item.filename, file_item.file.fileno())
except Exception:
LOG.exception("Failed to store the release to vault")
# return a warning and prompt the user to try again
if hasattr(response, 'text'):
from builtins import str as text
data = json.loads(response.text)
if 'warning' in data:
msg = _('The release file could not be stored in the vault, '
'please upload the release again!')
data['warning'] += msg
response.text = text(json.dumps(data))
proxy_utils.cleanup(request.environ)
return response
def patch_upload_dir_req(self, request, response):
files = []
for key, path in request.GET.items():
@ -229,21 +186,25 @@ class PatchAPIController(Middleware):
def notify(self, request, response):
# Send a RPC to dcmanager
LOG.info("Send RPC to dcmanager to set patching sync status to "
"unknown")
LOG.info("Send RPC to dcmanager to set patching sync status to unknown")
self.dcmanager_state_rpc_client.update_subcloud_endpoint_status(
self.ctxt,
endpoint_type=self.ENDPOINT_TYPE,
sync_status=dccommon_consts.SYNC_STATUS_UNKNOWN)
return response
def notify_usm(self, request, response):
# Send a RPC to dcmanager
LOG.info("Send RPC to dcmanager to set software sync status to unknown")
self.dcmanager_state_rpc_client.update_subcloud_endpoint_status(
self.ctxt,
endpoint_type=self.USM_ENDPOINT_TYPE,
sync_status=dccommon_consts.SYNC_STATUS_UNKNOWN)
return response
def patch_delete_req(self, request, response):
if cfg.CONF.use_usm:
patch_ids = proxy_utils.get_routing_match_value(request.environ,
'release_id')
else:
patch_ids = proxy_utils.get_routing_match_value(request.environ,
'patch_id')
patch_ids = proxy_utils.get_routing_match_value(request.environ,
'patch_id')
LOG.info("Deleting patches: %s", patch_ids)
patch_list = os.path.normpath(patch_ids).split(os.path.sep)
for patch_file in patch_list:

36
distributedcloud/dcorch/api/proxy/common/constants.py Executable file → Normal file
View File

@ -1,4 +1,4 @@
# Copyright 2017-2023 Wind River
# Copyright 2017-2024 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -184,53 +184,32 @@ NEUTRON_PATH_MAP = {
# Software
SOFTWARE_ACTION_QUERY = 'query'
SOFTWARE_ACTION_DELETE = 'delete'
SOFTWARE_ACTION_UPLOAD = 'upload'
SOFTWARE_ACTION_QUERY_DEPENDENCIES = 'query_dependencies'
SOFTWARE_ACTION_COMMIT_PATCH = 'commit-patch'
SOFTWARE_ACTION_UPLOAD_DIR = 'upload_dir'
SOFTWARE_ACTION_SHOW = 'show'
SOFTWARE_UPLOAD_PATHS = [
'/v1/upload',
'/software/upload',
]
SOFTWARE_UPLOAD_DIR_PATHS = [
'/v1/upload_dir',
'/software/upload_dir',
]
SOFTWARE_QUERY_PATHS = [
'/v1/query',
'/software/query',
]
SOFTWARE_DELETE_PATHS = [
'/v1/delete/{release_id}',
'/software/delete/{release_id:.*?}',
'/v1/software/query',
]
SOFTWARE_SHOW_PATHS = [
'/v1/show/{release_id}',
'/software/show/{release_id:.*?}',
'/v1/software/show/{release_id:.*?}',
]
SOFTWARE_COMMIT_PATCH_PATHS = [
'/software/commit_dry_run/{release_id:.*?}',
'/software/commit_patch/{release_id:.*?}',
'/v1/software/commit_dry_run/{release_id:.*?}',
'/v1/software/commit_patch/{release_id:.*?}',
]
SOFTWARE_QUERY_DEPENDENCIES_PATHS = [
'/software/query_dependencies/{release_id:.*?}',
'/v1/software/query_dependencies/{release_id:.*?}',
]
SOFTWARE_PATH_MAP = {
SOFTWARE_ACTION_UPLOAD: SOFTWARE_UPLOAD_PATHS,
SOFTWARE_ACTION_UPLOAD_DIR: SOFTWARE_UPLOAD_DIR_PATHS,
SOFTWARE_ACTION_QUERY: SOFTWARE_QUERY_PATHS,
SOFTWARE_ACTION_DELETE: SOFTWARE_DELETE_PATHS,
SOFTWARE_ACTION_SHOW: SOFTWARE_SHOW_PATHS,
SOFTWARE_ACTION_COMMIT_PATCH: SOFTWARE_COMMIT_PATCH_PATHS,
SOFTWARE_ACTION_QUERY_DEPENDENCIES: SOFTWARE_QUERY_DEPENDENCIES_PATHS
@ -403,9 +382,6 @@ ROUTE_METHOD_MAP = {
SOFTWARE_ACTION_SHOW: ['GET'],
SOFTWARE_ACTION_QUERY_DEPENDENCIES: ['GET'],
SOFTWARE_ACTION_COMMIT_PATCH: ['POST'],
SOFTWARE_ACTION_DELETE: ['POST'],
SOFTWARE_ACTION_UPLOAD: ['POST'],
SOFTWARE_ACTION_UPLOAD_DIR: ['POST']
},
dccommon_consts.ENDPOINT_TYPE_IDENTITY: {
consts.RESOURCE_TYPE_IDENTITY_USERS:

View File

@ -1,4 +1,4 @@
# Copyright 2017-2023 Wind River
# Copyright 2017-2024 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -42,11 +42,10 @@ def get_host_port_options(cfg):
return cfg.platform.bind_host, cfg.platform.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_NETWORK:
return cfg.network.bind_host, cfg.network.bind_port
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_SOFTWARE:
return cfg.usm.bind_host, cfg.usm.bind_port
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PATCHING:
if cfg.use_usm:
return cfg.usm.bind_host, cfg.usm.bind_port
else:
return cfg.patching.bind_host, cfg.patching.bind_port
return cfg.patching.bind_host, cfg.patching.bind_port
elif cfg.type == consts.ENDPOINT_TYPE_VOLUME:
return cfg.volume.bind_host, cfg.volume.bind_port
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_IDENTITY:
@ -63,11 +62,10 @@ def get_remote_host_port_options(cfg):
return cfg.platform.remote_host, cfg.platform.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_NETWORK:
return cfg.network.remote_host, cfg.network.remote_port
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_SOFTWARE:
return cfg.usm.remote_host, cfg.usm.remote_port
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_PATCHING:
if cfg.use_usm:
return cfg.usm.remote_host, cfg.usm.remote_port
else:
return cfg.patching.remote_host, cfg.patching.remote_port
return cfg.patching.remote_host, cfg.patching.remote_port
elif cfg.type == consts.ENDPOINT_TYPE_VOLUME:
return cfg.volume.remote_host, cfg.volume.remote_port
elif cfg.type == dccommon_consts.ENDPOINT_TYPE_IDENTITY:

View File

@ -214,11 +214,6 @@ fernet_opts = [
help='Hours between running fernet key rotation tasks.')
]
usm_opts = [
cfg.BoolOpt('use_usm', default=False,
help='parameter to enable USM API')
]
usm_proxy_opts = [
cfg.StrOpt('bind_host', default="0.0.0.0",
help='IP address for API proxy to listen for incoming connections'),
@ -270,7 +265,6 @@ def list_opts():
yield usm_group.name, usm_proxy_opts
yield None, global_opts
yield None, common_opts
yield None, usm_opts
def register_options():