diff --git a/sysinv/sysinv/sysinv/scripts/sysinv-conductor b/sysinv/sysinv/sysinv/scripts/sysinv-conductor index 9ff6c0d06f..f6f39c8583 100755 --- a/sysinv/sysinv/sysinv/scripts/sysinv-conductor +++ b/sysinv/sysinv/sysinv/scripts/sysinv-conductor @@ -184,7 +184,7 @@ sysinv_conductor_monitor () { fi # A workaround for monitoring the owner of /home/sysadmin/.kube - # This should be removed as soon as Helm v3 and Armada containerization work is complete. + # This should be removed as soon as Helm v3 containerization work is complete. if [ -d /home/sysadmin/.kube -a "$(stat -c %U /home/sysadmin/.kube)" != "sysadmin" ]; then chown -R sysadmin:sys_protected /home/sysadmin/.kube ocf_log info "Fixing /home/sysadmin/.kube ownership" @@ -215,11 +215,6 @@ sysinv_conductor_start () { fi fi - # Remove any existing application containers in case they were not shut - # down cleanly. sysinv-conductor will start these on demand with first use. - ocf_log info "${proc} Cleaning up any stale application containers before start. (sysinv-conductor)" - sysinv_remove_application_containers - if [ ${OCF_RESKEY_dbg} = "true" ] ; then RUN_OPT_DEBUG="--debug" else @@ -251,48 +246,6 @@ sysinv_conductor_start () { return ${rc} } -sysinv_remove_application_containers() { - local containers='armada_service' - local rc - - # The entry point for this is when the conductor has been confirmed to be - # stopped. Now cleanup any dependent service containers. This will be done - # here until we re-factor the management of (i.e. catch SIGKILL and cleanup) - # or the retirement of (i.e. move armada to a pod) these dependencies - - # On a non K8S configuration docker status will be EXIT_NOTIMPLEMENTED - systemctl status docker 2>&1 >> /dev/null - rc=$? - if [ $rc -eq 3 ]; then - ocf_log info "${proc} Docker is not running, skipping container actions. (sysinv-conductor)" - return - fi - - # Shutdown containers with DRBD dependencies that would prevent a swact. - for c in $containers; do - local id - - # does the container exist - id=$(docker container ls -qf name=${c} 2>/dev/null) - if [ ! -n "$id" ]; then - ocf_log info "${proc} Container $c is not started, skipping stop action. (sysinv-conductor)" - else - # Graceful shutdown (default is 10 sec, then kill) - ocf_log info "${proc} About to stop container $c... (sysinv-conductor)" - docker stop $c 2>&1 >> /dev/null - fi - - id=$(docker container ls -aqf name=${c} 2>/dev/null) - if [ ! -n "$id" ]; then - ocf_log info "${proc} Container $c is not present, skipping remove action. (sysinv-conductor)" - else - # Cleanup the container. Use force just in case. - ocf_log info "${proc} About to remove container $c... (sysinv-conductor)" - docker rm -f $c 2>&1 >> /dev/null - fi - done -} - sysinv_conductor_confirm_stop() { local my_bin local my_processes @@ -322,8 +275,6 @@ sysinv_conductor_stop () { ocf_log info "${proc} Sysinv Conductor (sysinv-conductor) already stopped" sysinv_conductor_confirm_stop - sysinv_remove_application_containers - return ${OCF_SUCCESS} fi @@ -364,8 +315,6 @@ sysinv_conductor_stop () { fi sysinv_conductor_confirm_stop - sysinv_remove_application_containers - ocf_log info "${proc} Sysinv Conductor (sysinv-conductor) stopped." rm -f $OCF_RESKEY_pid diff --git a/sysinv/sysinv/sysinv/setup.cfg b/sysinv/sysinv/sysinv/setup.cfg index 8e8b6829ba..1b8864a051 100644 --- a/sysinv/sysinv/sysinv/setup.cfg +++ b/sysinv/sysinv/sysinv/setup.cfg @@ -72,9 +72,6 @@ systemconfig.puppet_plugins = 042_sssd = sysinv.puppet.sssd:SssdPuppet 099_service_parameter = sysinv.puppet.service_parameter:ServiceParamPuppet -systemconfig.armada.manifest_ops = - generic = sysinv.helm.manifest_generic:GenericArmadaManifestOperator - systemconfig.fluxcd.kustomize_ops = generic = sysinv.helm.kustomize_generic:GenericFluxCDKustomizeOperator diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py index 4b5df2b16b..bfae384184 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py @@ -202,7 +202,7 @@ class KubeAppController(rest.RestController): @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(KubeApp, body=types.apidict) def post(self, body): - """Uploading an application to be deployed by Armada""" + """Uploading an application to be deployed""" tarfile_path = body.get('tarfile') tarfile_binary = body.get('binary_data', '') name = body.get('name', '') @@ -491,12 +491,13 @@ class KubeAppController(rest.RestController): applied_app.progress = None applied_app.save() + # TODO revise comment below regarding armada # If the version has ever applied before(inactive app found), - # use armada rollback to apply application later, otherwise, - # use armada apply. - # On the AIO-SX, always use armada apply even it was applied + # use ----- rollback to apply application later, otherwise, + # use ----- apply. + # On the AIO-SX, always use ----- apply even it was applied # before, issue on AIO-SX(replicas is 1) to leverage rollback, - # armada/helm rollback --wait does not wait for pods to be + # -----/helm rollback --wait does not wait for pods to be # ready before it returns. # related to helm issue, # https://github.com/helm/helm/issues/4210 @@ -697,19 +698,14 @@ class KubeAppHelper(object): def _find_manifest(self, app_path, app_name): """ Find the required application manifest elements - Check for an Armada manifest or a FluxCD manifest directory + Check for a FluxCD manifest directory """ try: # Check for the presence of a FluxCD manifest directory mfile = self._find_fluxcd_manifest(app_path, app_name) except exception.SysinvException as fluxcd_e: - try: - # Check for the presence of an Armada manifest - mfile = self._find_armada_manifest(app_path) - except exception.SysinvException as armada_e: - raise exception.SysinvException(_( - "Application-upload rejected: {} and {} ".format( - fluxcd_e, armada_e))) + raise exception.SysinvException(_( + "Application-upload rejected: {}".format(fluxcd_e))) return mfile def _find_fluxcd_manifest(self, app_path, app_name): @@ -720,26 +716,6 @@ class KubeAppHelper(object): raise exception.SysinvException(_( "FluxCD manifest structure is not present")) - def _find_armada_manifest(self, app_path): - # It is expected that there is only one manifest file - # per application and the file exists at top level of - # the application path. - mfiles = cutils.find_armada_manifest_file(app_path) - - if mfiles is None: - raise exception.SysinvException(_( - "Armada manifest file is corrupted.")) - - if mfiles: - if len(mfiles) == 1: - return mfiles[0] - else: - raise exception.SysinvException(_( - "tar file contains more than one Armada manifest file.")) - - raise exception.SysinvException(_( - "Armada manifest file/directory is missing")) - def _verify_metadata_file(self, app_path, app_name, app_version, upgrade_from_release=None): try: diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py index 178ac48d5e..c18dbd384d 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py @@ -256,8 +256,6 @@ class KubeUpgradeController(rest.RestController): apps = pecan.request.dbapi.kube_app_get_all() self._check_installed_apps_compatibility(apps, to_version) - # TODO: check that tiller/armada support new k8s version - # The system must be healthy success, output = pecan.request.rpcapi.get_system_health( pecan.request.context, diff --git a/sysinv/sysinv/sysinv/sysinv/cmd/helm.py b/sysinv/sysinv/sysinv/sysinv/cmd/helm.py index 15ef72d906..c21d0a2e7b 100644 --- a/sysinv/sysinv/sysinv/sysinv/cmd/helm.py +++ b/sysinv/sysinv/sysinv/sysinv/cmd/helm.py @@ -47,42 +47,11 @@ def create_fluxcd_app_overrides_action(path, app_name=None, namespace=None): app_operator.activate_app_plugins(db_app) helm_operator.generate_helm_application_overrides( path, app_name, mode=None, cnamespace=namespace, - armada_format=False, chart_info=None, combined=False, - is_fluxcd_app=True) + chart_info=None, combined=False) app_operator.deactivate_app_plugins(db_app) else: helm_operator.generate_helm_application_overrides( - path, app_name, mode=None, cnamespace=namespace, - is_fluxcd_app=True) - - -def create_armada_app_overrides_action(path, app_name=None, namespace=None): - dbapi = api.get_instance() - - try: - db_app = dbapi.kube_app_get(app_name) - except exception.KubeAppNotFound: - LOG.info("Application %s not found" % app_name) - return - - helm_operator = helm.HelmOperator(dbapi=dbapi) - app_operator = kube_app.AppOperator(dbapi, helm_operator, {}) - - if not app_operator.app_has_system_plugins(db_app): - LOG.info("Overrides generation for application %s is " - "not supported via this command." % app_name) - else: - if db_app.status == constants.APP_UPLOAD_SUCCESS: - app_operator.activate_app_plugins(db_app) - helm_operator.generate_helm_application_overrides( - path, app_name, mode=None, cnamespace=namespace, - armada_format=False, chart_info=None, combined=False, - is_fluxcd_app=False) - app_operator.deactivate_app_plugins(db_app) - else: - helm_operator.generate_helm_application_overrides( - path, app_name, mode=None, cnamespace=namespace, - is_fluxcd_app=False) + path, app_name, mode=None, cnamespace=namespace) def add_action_parsers(subparsers): @@ -92,12 +61,6 @@ def add_action_parsers(subparsers): parser.add_argument('app_name', nargs='?') parser.add_argument('namespace', nargs='?') - parser = subparsers.add_parser('create-armada-app-overrides') - parser.set_defaults(func=create_armada_app_overrides_action) - parser.add_argument('path', nargs='?') - parser.add_argument('app_name', nargs='?') - parser.add_argument('namespace', nargs='?') - CONF.register_cli_opt( cfg.SubCommandOpt('action', @@ -119,12 +82,3 @@ def main(): CONF.action.func(CONF.action.path, CONF.action.app_name, CONF.action.namespace) - elif CONF.action.name == 'create-armada-app-overrides': - if not CONF.action.path: - LOG.error("A path is required to save overrides") - elif not CONF.action.app_name: - LOG.error("Armada application name is required") - else: - CONF.action.func(CONF.action.path, - CONF.action.app_name, - CONF.action.namespace) diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 46e3696374..8e0f51aac6 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -1747,9 +1747,6 @@ APP_METADATA_FILE = 'metadata.yaml' APP_PENDING_REAPPLY_FLAG = os.path.join( tsc.HELM_OVERRIDES_PATH, ".app_reapply") -# Armada -APP_SYNCED_ARMADA_DATA_PATH = os.path.join(tsc.PLATFORM_PATH, 'armada', tsc.SW_VERSION) - # FluxCD APP_FLUXCD_MANIFEST_DIR = 'fluxcd-manifests' APP_FLUXCD_DATA_PATH = os.path.join(tsc.PLATFORM_PATH, 'fluxcd', tsc.SW_VERSION) @@ -1822,12 +1819,10 @@ APP_LIFECYCLE_TYPE_SEMANTIC_CHECK = 'check' APP_LIFECYCLE_TYPE_OPERATION = 'operation' APP_LIFECYCLE_TYPE_RBD = 'rbd' APP_LIFECYCLE_TYPE_RESOURCE = 'resource' -# armada manifest +# fluxcd manifest # outside the function that has the retry decorator APP_LIFECYCLE_TYPE_MANIFEST = 'manifest' # inside the function that has a retry decorator -APP_LIFECYCLE_TYPE_ARMADA_REQUEST = 'armada-request' -# same as armada APP_LIFECYCLE_TYPE_FLUXCD_REQUEST = 'fluxcd-request' APP_LIFECYCLE_MODE_MANUAL = 'manual' @@ -1975,9 +1970,6 @@ APP_PROGRESS_RECOVER_CHARTS = 'recovering helm charts' APP_PROGRESS_UPDATE_FAILED_SKIP_RECOVERY = "Application {} update from " \ "version {} to version {} failed and recovery skipped " \ "because skip_recovery was requested." -APP_PROGRESS_UPDATE_FAILED_ARMADA_TO_FLUXCD = "Application {} update from " \ - "version {} to version {} failed and recovery skipped " \ - "because recovering between Armada and FluxCD is not allowed" APP_PROGRESS_REMOVE_FAILED_WARNING = "Application remove failed. Status forced to '{}'. " \ "Use native helm commands to clean up application helm releases." diff --git a/sysinv/sysinv/sysinv/sysinv/common/utils.py b/sysinv/sysinv/sysinv/sysinv/common/utils.py index d6d829d2be..aa6727702b 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/common/utils.py @@ -2557,37 +2557,6 @@ def find_metadata_file(path, metadata_file, upgrade_from_release=None): return app_name, app_version, patches -def find_armada_manifest_file(path): - """ Find all Armada manifest files in a given directory. """ - def _is_armada_manifest(yaml_file): - with io.open(yaml_file, 'r', encoding='utf-8') as f: - docs = yaml.load_all(f) - for doc in docs: - try: - if "armada/Manifest" in doc['schema']: - manifest_name = doc['metadata']['name'] - return manifest_name, yaml_file - except KeyError: - # Could be some other yaml files - pass - return None, None - - mfiles = [] - for file in os.listdir(path): - if file.endswith('.yaml'): - yaml_file = os.path.join(path, file) - try: - mname, mfile = _is_armada_manifest(yaml_file) - if mfile: - mfiles.append((mname, mfile)) - except Exception as e: - # Included yaml file is corrupted - LOG.exception(e) - return None - - return mfiles - - def find_fluxcd_manifests_directory(path, name): """For FluxCD apps we expect to have one top-level manifest directory that contains the name of constants.APP_FLUXCD_MANIFEST_DIR. Validate that it @@ -2759,25 +2728,6 @@ def is_aio_duplex_system(dbapi): system.system_mode == constants.SYSTEM_MODE_DUPLEX_DIRECT)) -def generate_synced_armada_dir(app_name, app_version): - """ Armada application: Top level directory. """ - return os.path.join(constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version) - - -def generate_synced_armada_manifest_fqpn(app_name, app_version, manifest_filename): - """ Armada application: Armada manifest file. """ - return os.path.join( - constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version, - app_name + '-' + manifest_filename) - - -def generate_synced_metadata_fqpn(app_name, app_version): - """ Armada application: Application metadata file. """ - return os.path.join( - constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version, - 'metadata.yaml') - - def generate_synced_fluxcd_dir(app_name, app_version): """ FluxCD application: Top level directory. """ return os.path.join(constants.APP_FLUXCD_DATA_PATH, app_name, app_version) @@ -2847,7 +2797,7 @@ def get_app_supported_kube_version(app_name, app_version): """Get the application supported k8s version from the synced application metadata file""" app_metadata_path = os.path.join( - constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, + constants.APP_FLUXCD_DATA_PATH, app_name, app_version, constants.APP_METADATA_FILE) kube_min_version = None diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py b/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py index f539e956d4..466e5ed6c2 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py @@ -35,7 +35,6 @@ import zipfile from collections import namedtuple from distutils.util import strtobool from eventlet import greenpool -from eventlet import greenthread from eventlet import queue from eventlet import Timeout from fm_api import constants as fm_constants @@ -62,11 +61,6 @@ LOG = logging.getLogger(__name__) # Constants APPLY_SEARCH_PATTERN = 'Processing Chart,' -ARMADA_NAMESPACE = 'armada' -ARMADA_APPLICATION = 'armada' -ARMADA_CONTAINER_NAME = 'armada-api' -ARMADA_MANIFEST_APPLY_SUCCESS_MSG = 'Done applying manifest' -ARMADA_RELEASE_ROLLBACK_FAILURE_MSG = 'Error while rolling back tiller release' CONTAINER_ABNORMAL_EXIT_CODE = 137 DELETE_SEARCH_PATTERN = 'Deleting release|no release to delete' ROLLBACK_SEARCH_PATTERN = 'Helm rollback of release' @@ -77,16 +71,6 @@ DOWNLOAD_WAIT_BEFORE_RETRY = 15 TARFILE_DOWNLOAD_CONNECTION_TIMEOUT = 60 TARFILE_TRANSFER_CHUNK_SIZE = 1024 * 512 -ARMADA_LOG_MAX = 10 -ARMADA_HOST_LOG_LOCATION = '/var/log/armada' -ARMADA_CONTAINER_LOG_LOCATION = '/logs' -ARMADA_CONTAINER_TMP = '/tmp' -ARMADA_LOCK_GROUP = 'armada.process' -ARMADA_LOCK_VERSION = 'v1' -ARMADA_LOCK_NAMESPACE = 'kube-system' -ARMADA_LOCK_PLURAL = 'locks' -ARMADA_LOCK_NAME = 'lock' - LOCK_NAME_APP_REAPPLY = 'app_reapply' LOCK_NAME_PROCESS_APP_METADATA = 'process_app_metadata' @@ -94,22 +78,11 @@ STX_APP_PLUGIN_PATH = '/var/stx_app/plugins' # Helper functions -def generate_armada_service_manifest_fqpn(app_name, app_version, manifest_filename): - return os.path.join('/manifests', app_name, app_version, - app_name + '-' + manifest_filename) - - def generate_install_manifest_fqpn(app_name, app_version, manifest_filename): return os.path.join(constants.APP_INSTALL_PATH, app_name, app_version, manifest_filename) -def generate_synced_images_fqpn(app_name, app_version): - return os.path.join( - constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version, - app_name + '-images.yaml') - - def generate_synced_helm_overrides_dir(app_name, app_version): return os.path.join(common.HELM_OVERRIDES_PATH, app_name, app_version) @@ -168,7 +141,6 @@ class AppOperator(object): self._utils = kube_app.KubeAppHelper(self._dbapi) self._image = AppImageParser() self._lock = threading.Lock() - self._armada = ArmadaHelper(self._kube) self._fluxcd = FluxCDHelper(self._dbapi, self._kube) # Load apps metadata @@ -208,12 +180,6 @@ class AppOperator(object): else: continue - # Delete the Armada locks that might have been acquired previously - # for a fresh start. This guarantees that a re-apply, re-update or - # a re-remove attempt following a status reset will not fail due - # to a lock related issue. - self._armada.clear_armada_locks() - def _raise_app_alarm(self, app_name, app_action, alarm_id, severity, reason_text, alarm_type, repair_action, service_affecting): @@ -303,24 +269,17 @@ class AppOperator(object): shutil.rmtree(os.path.dirname( app.sync_overrides_dir)) - if os.path.exists(app.sync_armada_mfile_dir): - shutil.rmtree(app.sync_armada_mfile_dir) - if app_dir: - shutil.rmtree(os.path.dirname( - app.sync_armada_mfile_dir)) - if os.path.exists(app.inst_path): shutil.rmtree(app.inst_path) if app_dir: shutil.rmtree(os.path.dirname( app.inst_path)) - if app.is_fluxcd_app: - if os.path.exists(app.sync_fluxcd_manifest_dir): - shutil.rmtree(app.sync_fluxcd_manifest_dir) - if app_dir: - shutil.rmtree(os.path.dirname( - app.sync_fluxcd_manifest_dir)) + if os.path.exists(app.sync_fluxcd_manifest_dir): + shutil.rmtree(app.sync_fluxcd_manifest_dir) + if app_dir: + shutil.rmtree(os.path.dirname( + app.sync_fluxcd_manifest_dir)) except OSError as e: LOG.error(e) @@ -499,21 +458,12 @@ class AppOperator(object): orig_uid, orig_gid = get_app_install_root_path_ownership() try: + # One time set up of fluxcd manifest path for the system + if not os.path.isdir(constants.APP_FLUXCD_DATA_PATH): + os.makedirs(constants.APP_FLUXCD_DATA_PATH) - if app.is_fluxcd_app: - # One time set up of fluxcd manifest path for the system - if not os.path.isdir(constants.APP_FLUXCD_DATA_PATH): - os.makedirs(constants.APP_FLUXCD_DATA_PATH) - - if not os.path.isdir(app.sync_fluxcd_manifest_dir): - os.makedirs(app.sync_fluxcd_manifest_dir) - else: - # One time set up of base armada manifest path for the system - if not os.path.isdir(constants.APP_SYNCED_ARMADA_DATA_PATH): - os.makedirs(constants.APP_SYNCED_ARMADA_DATA_PATH) - - if not os.path.isdir(app.sync_armada_mfile_dir): - os.makedirs(app.sync_armada_mfile_dir) + if not os.path.isdir(app.sync_fluxcd_manifest_dir): + os.makedirs(app.sync_fluxcd_manifest_dir) if not os.path.isdir(app.inst_path): create_app_path(app.inst_path) @@ -568,14 +518,9 @@ class AppOperator(object): (ie..registry.local:9001/docker.io/mariadb:10.2.13) """ - if app.is_fluxcd_app: - return self._get_image_tags_by_charts_fluxcd(app.sync_imgfile, + return self._get_image_tags_by_charts_fluxcd(app.sync_imgfile, app.sync_fluxcd_manifest, app.sync_overrides_dir) - else: - return self._get_image_tags_by_charts_armada(app.sync_imgfile, - app.sync_armada_mfile, - app.sync_overrides_dir) def _get_image_tags_by_charts_fluxcd(self, app_images_file, manifest, overrides_dir): app_imgs = [] @@ -691,100 +636,6 @@ class AppOperator(object): return list(set(app_imgs)) - def _get_image_tags_by_charts_armada(self, app_images_file, app_manifest_file, overrides_dir): - app_imgs = [] - images_file = None - manifest_update_required = False - - if os.path.exists(app_images_file): - with io.open(app_images_file, 'r', encoding='utf-8') as f: - images_file = yaml.safe_load(f) - - if os.path.exists(app_manifest_file): - with io.open(app_manifest_file, 'r', encoding='utf-8') as f: - # The RoundTripLoader removes the superfluous quotes by default, - # resulting the dumped out charts not readable in Armada. - # Set preserve_quotes=True to preserve all the quotes. - charts = list(yaml.load_all( - f, Loader=yaml.RoundTripLoader, preserve_quotes=True)) - - for chart in charts: - if "armada/Chart/" in chart['schema']: - chart_data = chart['data'] - chart_name = chart_data['chart_name'] - chart_namespace = chart_data['namespace'] - - # Get the image tags by chart from the images file - helm_chart_imgs = {} - if images_file and chart_name in images_file: - helm_chart_imgs = images_file[chart_name] - - # Get the image tags from the chart overrides file - overrides = chart_namespace + '-' + chart_name + '.yaml' - app_overrides_file = os.path.join(overrides_dir, overrides) - overrides_file = {} - if os.path.exists(app_overrides_file): - with io.open(app_overrides_file, 'r', encoding='utf-8') as f: - overrides_file = yaml.safe_load(f) - - override_imgs = self._image.find_images_in_dict( - overrides_file.get('data', {}).get('values', {})) - override_imgs_copy = copy.deepcopy(override_imgs) - - # Get the image tags from the armada manifest file - armada_chart_imgs = self._image.find_images_in_dict( - chart_data.get('values', {})) - armada_chart_imgs_copy = copy.deepcopy(armada_chart_imgs) - armada_chart_imgs = self._image.merge_dict(helm_chart_imgs, armada_chart_imgs) - - # Update image tags with local registry prefix - override_imgs = self._image.update_images_with_local_registry(override_imgs) - armada_chart_imgs = self._image.update_images_with_local_registry(armada_chart_imgs) - - # Generate a list of required images by chart - download_imgs = copy.deepcopy(armada_chart_imgs) - download_imgs = self._image.merge_dict(download_imgs, override_imgs) - download_imgs_list = self._image.generate_download_images_list(download_imgs, []) - app_imgs.extend(download_imgs_list) - - # Update chart override file if needed - if override_imgs != override_imgs_copy: - with open(app_overrides_file, 'w') as f: - try: - overrides_file['data']['values'] = self._image.merge_dict( - overrides_file['data']['values'], override_imgs) - yaml.safe_dump(overrides_file, f, default_flow_style=False) - LOG.info("Overrides file %s updated with new image tags" % - app_overrides_file) - except (TypeError, KeyError): - LOG.error("Overrides file %s fails to update" % - app_overrides_file) - - # Update armada chart if needed - if armada_chart_imgs != armada_chart_imgs_copy: - # This is to convert a empty orderedDict to dict - if 'values' in chart_data: - if not chart_data['values']: - chart_data['values'] = {} - - chart_data['values'] = self._image.merge_dict( - chart_data.get('values', {}), armada_chart_imgs) - manifest_update_required = True - - # Update manifest file if needed - if manifest_update_required: - with open(app_manifest_file, 'w') as f: - try: - yaml.dump_all(charts, f, Dumper=yaml.RoundTripDumper, - explicit_start=True, default_flow_style=False) - LOG.info("Manifest file %s updated with new image tags" % - app_manifest_file) - except Exception as e: - LOG.error("Manifest file %s fails to update with " - "new image tags: %s" % (app_manifest_file, e)) - - return list(set(app_imgs)) - def _register_embedded_images(self, app): """ TODO(tngo): When we're ready to support air-gap scenario and private @@ -804,7 +655,7 @@ class AppOperator(object): def _save_images_list(self, app): # Extract the list of images from the charts and overrides where - # applicable. Save the list to the same location as the armada manifest + # applicable. Save the list to the same location as the fluxcd manifest # so it can be sync'ed. app.charts = self._get_list_of_charts(app) @@ -812,8 +663,7 @@ class AppOperator(object): LOG.info("Generating application overrides to discover required images.") self._helm.generate_helm_application_overrides( app.sync_overrides_dir, app.name, mode=None, cnamespace=None, - armada_format=True, chart_info=app.charts, combined=True, - is_fluxcd_app=app.is_fluxcd_app) + chart_info=app.charts, combined=True) self._plugins.deactivate_plugins(app) self._save_images_list_by_charts(app) @@ -841,7 +691,7 @@ class AppOperator(object): chart_name = os.path.join(app.inst_charts_dir, chart.name) if not os.path.exists(chart_name): - # If the helm chart name is not the same as the armada + # If the helm chart name is not the same as the fluxcd # chart name in the manifest, try using the source # to find the chart directory. try: @@ -1236,10 +1086,7 @@ class AppOperator(object): raise def _get_list_of_charts(self, app): - if app.is_fluxcd_app: - return self._get_list_of_charts_fluxcd(app.sync_fluxcd_manifest) - else: - return self._get_list_of_charts_armada(app.sync_armada_mfile) + return self._get_list_of_charts_fluxcd(app.sync_fluxcd_manifest) def _get_list_of_charts_fluxcd(self, manifest): """Get the charts information from the manifest directory @@ -1312,151 +1159,8 @@ class AppOperator(object): charts.append(chart_obj) return charts - def _get_list_of_charts_armada(self, manifest_file): - """Get the charts information from the manifest file - - The following chart data for each chart in the manifest file - are extracted and stored into a namedtuple Chart object: - - metadata_name - - chart_name - - namespace - - location - - release - - pre-delete job labels - - The method returns a list of namedtuple charts which following - the install order in the manifest chart_groups. - - :param manifest_file: the manifest file of the application - :return: a list of namedtuple charts - """ - charts = [] - release_prefix = "" - chart_group = {} - chart_groups = [] - armada_charts = {} - - with io.open(manifest_file, 'r', encoding='utf-8') as f: - docs = yaml.safe_load_all(f) - for doc in docs: - # iterative docs in the manifest file to get required - # chart information - try: - if "armada/Manifest/" in doc['schema']: - release_prefix = doc['data']['release_prefix'] - chart_groups = doc['data']['chart_groups'] - - elif "armada/ChartGroup/" in doc['schema']: - chart_group.update( - {doc['metadata']['name']: { - 'chart_group': doc['data']['chart_group'], - 'sequenced': doc.get('data').get('sequenced', False)}}) - - elif "armada/Chart/" in doc['schema']: - labels = [] - delete_resource = \ - doc['data'].get('upgrade', {}).get('pre', {}).get('delete', []) - for resource in delete_resource: - if resource.get('type') == 'job': - label = '' - for k, v in resource['labels'].items(): - label = k + '=' + v + ',' + label - labels.append(label[:-1]) - - armada_charts.update( - {doc['metadata']['name']: { - 'chart_name': doc['data']['chart_name'], - 'namespace': doc['data']['namespace'], - 'location': doc['data']['source']['location'], - 'release': doc['data']['release'], - 'labels': labels}}) - LOG.debug("Manifest: Chart: {} Namespace: {} " - "Location: {} Release: {}".format( - doc['data']['chart_name'], - doc['data']['namespace'], - doc['data']['source']['location'], - doc['data']['release'])) - except KeyError: - pass - - # Push Chart to the list that following the order - # in the chart_groups(install list) - for c_group in chart_groups: - for chart in chart_group[c_group]['chart_group']: - charts.append(Chart( - metadata_name=chart, - name=armada_charts[chart]['chart_name'], - namespace=armada_charts[chart]['namespace'], - location=armada_charts[chart]['location'], - release=armada_charts[chart]['release'], - labels=armada_charts[chart]['labels'], - sequenced=chart_group[c_group]['sequenced'])) - del armada_charts[chart] - del chart_group[c_group] - - # Push Chart to the list that are not referenced - # in the chart_groups (install list) - if chart_group: - for c_group in chart_group: - for chart in chart_group[c_group]['chart_group']: - charts.append(Chart( - metadata_name=chart, - name=armada_charts[chart]['chart_name'], - namespace=armada_charts[chart]['namespace'], - location=armada_charts[chart]['location'], - release=armada_charts[chart]['release'], - labels=armada_charts[chart]['labels'], - sequenced=chart_group[c_group]['sequenced'])) - del armada_charts[chart] - - if armada_charts: - for chart in armada_charts: - charts.append(Chart( - metadata_name=chart, - name=armada_charts[chart]['chart_name'], - namespace=armada_charts[chart]['namespace'], - location=armada_charts[chart]['location'], - release=armada_charts[chart]['release'], - labels=armada_charts[chart]['labels'], - sequenced=False)) - - # Update each Chart in the list if there has release prefix - # for each release - if release_prefix: - for i, chart in enumerate(charts): - charts[i] = chart._replace( - release=release_prefix + "-" + chart.release) - - return charts - - def _get_overrides_files(self, app, mode): - if app.is_fluxcd_app: - return self._get_overrides_files_fluxcd(app.sync_overrides_dir, - app.charts), [] - else: - return self._get_overrides_files_armada(app.sync_overrides_dir, - app.charts, - app.name, - mode) - - def _get_overrides_files_fluxcd(self, overrides_dir, charts): - return self._get_overrides_from_charts(overrides_dir, charts) - - def _get_overrides_files_armada(self, overrides_dir, charts, app_name, mode): - """Returns list of override files or None, used in - application-install and application-delete.""" - - helm_overrides = \ - self._get_overrides_from_charts(overrides_dir, charts) - - if not helm_overrides: - return None - - # Get the armada manifest overrides files - manifest_op = self._helm.get_armada_manifest_operator(app_name) - armada_overrides = manifest_op.load_summary(overrides_dir) - - return (helm_overrides, armada_overrides) + def _get_overrides_files(self, app): + return self._get_overrides_from_charts(app.sync_overrides_dir, app.charts) def _get_overrides_from_charts(self, overrides_dir, charts): missing_helm_overrides = [] @@ -1494,25 +1198,6 @@ class AppOperator(object): if os.path.basename(helm_file) == override_file: shutil.copy(helm_file, chart_system_overrides_path) - def _generate_armada_overrides_str(self, app_name, app_version, - helm_files, armada_files): - overrides_str = "" - if helm_files: - overrides_str += " ".join([ - ' --values {0}/overrides/{1}/{2}/{3}'.format( - ARMADA_CONTAINER_TMP, - app_name, app_version, os.path.basename(i)) - for i in helm_files - ]) - if armada_files: - overrides_str += " ".join([ - ' --values {0}/manifests/{1}/{2}/{3}'.format( - ARMADA_CONTAINER_TMP, - app_name, app_version, os.path.basename(i)) - for i in armada_files - ]) - return overrides_str - def _remove_chart_overrides(self, overrides_dir, app): charts = self._get_list_of_charts(app) for chart in charts: @@ -1706,12 +1391,8 @@ class AppOperator(object): "Chart %s from version %s" % (to_app.name, to_app.version, chart.name, from_app.version)) - def _make_app_request(self, app, request, overrides_str=None): - if app.is_fluxcd_app: - return self._make_fluxcd_operation_with_monitor(app, request) - - else: - return self._make_armada_request_with_monitor(app, request, overrides_str) + def _make_app_request(self, app, request): + return self._make_fluxcd_operation_with_monitor(app, request) @retry(retry_on_exception=lambda x: isinstance(x, exception.ApplicationApplyFailure), stop_max_attempt_number=5, wait_fixed=30 * 1000) @@ -1980,175 +1661,6 @@ class AppOperator(object): self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info) return rc - @retry(retry_on_exception=lambda x: isinstance(x, exception.ApplicationApplyFailure), - stop_max_attempt_number=5, wait_fixed=30 * 1000) - def _make_armada_request_with_monitor(self, app, request, overrides_str=None): - """Initiate armada request with monitoring - - This method delegates the armada request to docker helper and starts - a monitoring thread to persist status and progress along the way. - - :param app: application data object - :param request: type of request (apply or delete) - :param overrides_str: list of overrides in string format to be applied - """ - - def _get_armada_log_stats(pattern, logfile): - """ - TODO(tngo): In the absence of an Armada API that provides the current - status of an apply/delete manifest operation, the progress is derived - from specific log entries extracted from the execution logs. This - inner method is to be replaced with an official API call when - it becomes available. - """ - if pattern == ROLLBACK_SEARCH_PATTERN: - print_chart = '{print $10}' - else: - print_chart = '{print $NF}' - - p1 = subprocess.Popen(['grep', pattern, logfile], - stdout=subprocess.PIPE) - p2 = subprocess.Popen(['awk', print_chart], stdin=p1.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - p1.stdout.close() - result, err = p2.communicate() - if result: - # Scrape information from command output, example 'validate' log: - # 2020-03-26 09:47:58.594 1105 INFO armada.cli [-] Successfully validated:\ - # ('/tmp/manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest.yaml',) - - # Strip out ANSI color code that might be in the text stream - r = re.compile("\x1b\[[0-9;]*m") - result = r.sub('', result).replace(',', '') - matches = result.split() - num_chart_processed = len(matches) - last_chart_processed = matches[num_chart_processed - 1] - if '=' in last_chart_processed: - last_chart_processed = last_chart_processed.split('=')[1] - return last_chart_processed, num_chart_processed - - return None, None - - def _check_progress(monitor_flag, app, pattern, logfile): - """ Progress monitoring task, to be run in a separate thread """ - LOG.info("Starting progress monitoring thread for app %s" % app.name) - - try: - adjust = self._get_metadata_value(app, - constants.APP_METADATA_APPLY_PROGRESS_ADJUST, - constants.APP_METADATA_APPLY_PROGRESS_ADJUST_DEFAULT_VALUE) - with Timeout(INSTALLATION_TIMEOUT, - exception.KubeAppProgressMonitorTimeout()): - - charts_count = len(app.charts) - while True: - try: - monitor_flag.get_nowait() - LOG.debug("Received monitor stop signal for %s" % app.name) - monitor_flag.task_done() - break - except queue.Empty: - last, num = _get_armada_log_stats(pattern, logfile) - if last: - if charts_count == 0: - percent = 100 - else: - tadjust = 0 - if app.system_app: - tadjust = adjust - if tadjust >= charts_count: - LOG.error("Application metadata key '{}'" - "has an invalid value {} (too few charts)". - format(constants.APP_METADATA_APPLY_PROGRESS_ADJUST, - adjust)) - tadjust = 0 - - percent = round((float(num) / # pylint: disable=W1619 - (charts_count - tadjust)) * 100) - - progress_str = "processing chart: {}, overall completion: {}%".\ - format(last, percent) - - if app.progress != progress_str: - LOG.info("%s" % progress_str) - self._update_app_status(app, new_progress=progress_str) - greenthread.sleep(1) - except Exception as e: - # timeout or subprocess error - LOG.exception(e) - finally: - LOG.info("Exiting progress monitoring thread for app %s" % app.name) - - def _cleanup_armada_log(location, app_name, request): - """Cleanup the oldest armada log if reach the maximum""" - list_of_logs = [os.path.join(location, f) for f in os.listdir(location) - if re.match(r'{}-{}.*.log'.format(app_name, request), f)] - - try: - if len(list_of_logs) > ARMADA_LOG_MAX: - oldest_logfile = min(list_of_logs, key=os.path.getctime) - os.remove(oldest_logfile) - except OSError: - pass - - # Body of the outer method - - # On N(stx.6.0) to N+2(stx.8.0) upgrades we need to keep the original - # 'delete' operation that Armada recognizes. - # The operation was renamed (intention was for Flux) for Armada by - # mistake: https://review.opendev.org/c/starlingx/config/+/866200/ - if request == constants.APP_REMOVE_OP: - request = constants.APP_DELETE_OP - - # This check is for cases where an abort is issued while - # this function waits between retries. In such cases, it - # should just return False - if AppOperator.is_app_aborted(app.name): - return False - - # TODO(dvoicule): Maybe pass a hook from outside to this function - # need to change perform_app_recover/rollback/update to support this. - # All the other hooks store the operation of the app itself (apply, - # remove, delete, upload, update) yet this hook stores the armada - # operation in the operation field. This is inconsistent behavior and - # should be changed the moment a hook from outside is passed here. - lifecycle_hook_info = LifecycleHookInfo() - lifecycle_hook_info.operation = request - lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE - lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST - self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info) - - mqueue = queue.Queue() - rc = True - logname = time.strftime(app.name + '-' + request + '_%Y-%m-%d-%H-%M-%S.log') - logfile = ARMADA_HOST_LOG_LOCATION + '/' + logname - - if request == constants.APP_APPLY_OP: - pattern = APPLY_SEARCH_PATTERN - elif request == constants.APP_DELETE_OP: - pattern = DELETE_SEARCH_PATTERN - else: - pattern = ROLLBACK_SEARCH_PATTERN - - monitor = greenthread.spawn_after(1, _check_progress, mqueue, app, - pattern, logfile) - rc = self._armada.make_armada_request(request, app.armada_service_mfile, - overrides_str, app.releases, logfile) - - _cleanup_armada_log(ARMADA_HOST_LOG_LOCATION, app.name, request) - mqueue.put('done') - monitor.kill() - - # Here a manifest retry can be performed by throwing ApplicationApplyFailure - lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_POST - lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST - lifecycle_hook_info[LifecycleConstants.EXTRA][LifecycleConstants.RETURN_CODE] = rc - self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info) - - return rc - def _record_auto_update_failed_versions(self, from_app, to_app): """Record the new application version in the old application metadata when the new application fails to be updated""" @@ -2166,12 +1678,12 @@ class AppOperator(object): with self._lock: from_app.update_app_metadata(new_metadata) - def _perform_app_recover(self, old_app, new_app, armada_process_required=True): + def _perform_app_recover(self, old_app, new_app, fluxcd_process_required=True): """Perform application recover This recover method is triggered when application update failed, it cleans up the files/data for the new application and recover helm charts for the - old application. If the armada process is required, armada apply is invoked + old application. If the fluxcd process is required, fluxcd apply is invoked to recover the application releases for the old version. The app status will be populated to "apply-failed" if recover fails so that @@ -2179,7 +1691,7 @@ class AppOperator(object): :param old_app: the application object that application recovering to :param new_app: the application object that application recovering from - :param armada_process_required: boolean, whether armada operation is needed + :param fluxcd_process_required: boolean, whether fluxcd operation is needed """ def _activate_old_app_plugins(old_app): @@ -2215,20 +1727,13 @@ class AppOperator(object): self._upload_helm_charts(old_app) rc = True - if armada_process_required: - overrides_str = '' + if fluxcd_process_required: old_app.charts = self._get_list_of_charts(old_app) - if old_app.system_app: - (helm_files, armada_files) = self._get_overrides_files( - old_app, mode=None) - overrides_str = self._generate_armada_overrides_str( - old_app.name, old_app.version, helm_files, armada_files) - - # Ensure that the old app plugins are enabled prior to armada process. + # Ensure that the old app plugins are enabled prior to fluxcd process. _activate_old_app_plugins(old_app) - if self._make_app_request(old_app, constants.APP_APPLY_OP, overrides_str): + if self._make_app_request(old_app, constants.APP_APPLY_OP): old_app_charts = [c.release for c in old_app.charts] deployed_releases = helm_utils.retrieve_helm_releases() for new_chart in new_app.charts: @@ -2236,7 +1741,7 @@ class AppOperator(object): new_chart.release in deployed_releases): # Cleanup the releases in the new application version # but are not in the old application version - helm_utils.delete_helm_release(new_chart.release) + helm_utils.delete_helm_v3_release(new_chart.release) else: rc = False @@ -2280,9 +1785,9 @@ class AppOperator(object): def _perform_app_rollback(self, from_app, to_app): """Perform application rollback request - This method invokes Armada to rollback the application releases to + This method invokes fluxcd to rollback the application releases to previous installed versions. The jobs for the current installed - releases require to be cleaned up before starting armada rollback. + releases require to be cleaned up before starting fluxcd rollback. :param from_app: application object that application updating from :param to_app: application object that application updating to @@ -2341,9 +1846,9 @@ class AppOperator(object): % (to_app.name, to_app.version)) return True except exception.KubeAppAbort: - # If the update operation is aborted before Armada request is made, + # If the update operation is aborted before fluxcd request is made, # we don't want to return False which would trigger the recovery - # routine with an Armada request. + # routine with an fluxcd request. raise except Exception as e: # unexpected KubeAppNotFound, KubeAppInactiveNotFound, KeyError @@ -2404,8 +1909,8 @@ class AppOperator(object): app.downloaded_tarfile = True # Full extraction of application tarball at /scratch/apps. - # Manifest file is placed under /opt/platform/armada - # which is managed by drbd-sync and visible to Armada. + # Manifest file is placed under /opt/platform/fluxcd + # which is managed by drbd-sync and visible to fluxcd. self._update_app_status( app, new_progress=constants.APP_PROGRESS_EXTRACT_TARFILE) @@ -2413,16 +1918,10 @@ class AppOperator(object): self._extract_tarfile(app) self._plugins.install_plugins(app) - if app.is_fluxcd_app: - manifest_sync_path = app.sync_fluxcd_manifest - manifest_sync_dir_path = app.sync_fluxcd_manifest_dir - validate_manifest = manifest_sync_path - validate_function = self._fluxcd.make_fluxcd_operation - else: - manifest_sync_path = app.sync_armada_mfile - manifest_sync_dir_path = app.sync_armada_mfile_dir - validate_manifest = app.armada_service_mfile - validate_function = self._armada.make_armada_request + manifest_sync_path = app.sync_fluxcd_manifest + manifest_sync_dir_path = app.sync_fluxcd_manifest_dir + validate_manifest = manifest_sync_path + validate_function = self._fluxcd.make_fluxcd_operation # Copy the manifest and metadata file to the drbd if os.path.isdir(app.inst_mfile): @@ -2451,8 +1950,7 @@ class AppOperator(object): with self._lock: self._upload_helm_charts(app) - # System overrides will be generated here. Plugins must be activated - # prior to scraping chart/system/armada overrides for images + # System overrides will be generated here. self._save_images_list(app) if images: @@ -2955,7 +2453,7 @@ class AppOperator(object): """Process application install request This method processes node labels per configuration and invokes - Armada to apply the application manifest. + fluxcd to apply the application manifest. For OpenStack app (system app), the method generates combined overrides (a merge between system and user overrides if available) @@ -3005,7 +2503,6 @@ class AppOperator(object): LOG.info("Application %s (%s) apply started." % (app.name, app.version)) - overrides_str = '' ready = True try: app.charts = self._get_list_of_charts(app) @@ -3035,19 +2532,13 @@ class AppOperator(object): LOG.info("Generating application overrides...") self._helm.generate_helm_application_overrides( app.sync_overrides_dir, app.name, mode, cnamespace=None, - armada_format=True, chart_info=app.charts, combined=True, - is_fluxcd_app=app.is_fluxcd_app) + chart_info=app.charts, combined=True) - overrides_str = None - (helm_files, armada_files) = self._get_overrides_files(app, mode) - if helm_files or armada_files: + helm_files = self._get_overrides_files(app) + if helm_files: LOG.info("Application overrides generated.") - if app.is_fluxcd_app: - # put the helm_overrides in the chart's system-overrides.yaml - self._write_fluxcd_overrides(app.charts, helm_files) - else: - overrides_str = self._generate_armada_overrides_str( - app.name, app.version, helm_files, armada_files) + # put the helm_overrides in the chart's system-overrides.yaml + self._write_fluxcd_overrides(app.charts, helm_files) self._update_app_status( app, new_progress=constants.APP_PROGRESS_DOWNLOAD_IMAGES) @@ -3096,7 +2587,7 @@ class AppOperator(object): if caller == constants.RECOVER_VIA_REMOVAL: return True - if self._make_app_request(app, constants.APP_APPLY_OP, overrides_str): + if self._make_app_request(app, constants.APP_APPLY_OP): self._update_app_releases_version(app.name) self._update_app_status(app, constants.APP_APPLY_SUCCESS, @@ -3152,7 +2643,7 @@ class AppOperator(object): """Process application update request This method leverages the existing application upload workflow to - validate/upload the new application tarfile, then invokes Armada + validate/upload the new application tarfile, then invokes fluxcd apply or rollback to update application from an applied version to the new version. If any failure happens during updating, the recover action will be triggered to recover the application to @@ -3194,7 +2685,6 @@ class AppOperator(object): LOG.info("Start updating Application %s from version %s to version %s ..." % (to_app.name, from_app.version, to_app.version)) - armada_to_fluxcd = from_app.is_fluxcd_app != to_app.is_fluxcd_app try: # Upload new app tarball. The upload will enable the new plugins to # generate overrides for images. Disable the plugins for the current @@ -3224,13 +2714,13 @@ class AppOperator(object): "".format(to_app.name, constants.APP_UPDATE_OP, str(e))) # lifecycle hooks not used in perform_app_recover return self._perform_app_recover(from_app, to_app, - armada_process_required=False) + fluxcd_process_required=False) except Exception as e: LOG.error("App {} operation {} semantic check error: {}" "".format(to_app.name, constants.APP_UPDATE_OP, str(e))) # lifecycle hooks not used in perform_app_recover return self._perform_app_recover(from_app, to_app, - armada_process_required=False) + fluxcd_process_required=False) self.load_application_metadata_from_file(to_rpc_app) @@ -3303,15 +2793,6 @@ class AppOperator(object): to_app.name, to_app.version, skip_recovery) do_recovery = False - # Here the app operation failed (do_recovery is True) - # but apps belong to differente helm versions. - if armada_to_fluxcd and do_recovery: - LOG.info("Application %s (%s) uses FluxCD (Helm3) and cannot" - " rollback to Application %s (%s) that uses Armada (Helm2)" - ", recovery skipped.", - to_app.name, to_app.version, from_app.name, from_app.version) - do_recovery = False - # If recovery is requested stop the flow of execution here if do_recovery: LOG.error("Application %s update from version %s to version " @@ -3333,7 +2814,7 @@ class AppOperator(object): from_chart.release in deployed_releases): # Cleanup the releases in the old application version # but are not in the new application version - helm_utils.delete_helm_release(from_chart.release) + helm_utils.delete_helm_v3_release(from_chart.release) LOG.info("Helm release %s for Application %s (%s) deleted" % (from_chart.release, from_app.name, from_app.version)) @@ -3352,17 +2833,11 @@ class AppOperator(object): # The initial operation for to_app failed # This is reached here only when skip_recovery is requested - # Or when updating between Helm versions (Armada <-> FluxCD) # Need to inform the user else: - message = \ - constants.APP_PROGRESS_UPDATE_FAILED_SKIP_RECOVERY.format( - to_app.name, from_app.version, to_app.version) \ - if skip_recovery else \ - constants.APP_PROGRESS_UPDATE_FAILED_ARMADA_TO_FLUXCD.format( - to_app.name, from_app.version, to_app.version) - self._update_app_status( - to_app, constants.APP_APPLY_FAILURE, message) + message = constants.APP_PROGRESS_UPDATE_FAILED_SKIP_RECOVERY.format( + to_app.name, from_app.version, to_app.version) + self._update_app_status(to_app, constants.APP_APPLY_FAILURE, message) LOG.info(message) except (exception.IncompatibleKubeVersion, @@ -3370,15 +2845,15 @@ class AppOperator(object): exception.KubeAppApplyFailure, exception.KubeAppAbort) as e: # Error occurs during app uploading or applying but before - # armada apply process... + # apply process... # ie.images download/k8s resource creation failure - # Start recovering without trigger armada process + # Start recovering without trigger fluxcd process LOG.exception(e) # lifecycle hooks not used in perform_app_recover return self._perform_app_recover(from_app, to_app, - armada_process_required=False) + fluxcd_process_required=False) except Exception as e: - # Application update successfully(armada apply/rollback) + # Application update successfully(fluxcd apply/rollback) # Error occurs during cleanup old app # ie. delete app files failure, patch controller failure, # helm release delete failure @@ -3397,7 +2872,7 @@ class AppOperator(object): def perform_app_remove(self, rpc_app, lifecycle_hook_info_app_remove, force=False): """Process application remove request - This method invokes Armada to delete the application manifest. + This method invokes fluxcd to delete the application manifest. For system app, it also cleans up old test pods. :param rpc_app: application object in the RPC request @@ -3462,7 +2937,7 @@ class AppOperator(object): helm_utils.delete_helm_v3_release(helm_release_dict['spec']['releaseName'], namespace=namespace) if self._make_app_request(app, constants.APP_REMOVE_OP): - # After armada delete, the data for the releases are purged from + # After fluxcd delete, the data for the releases are purged from # tiller/etcd, the releases info for the active app stored in sysinv # db should be set back to 0 and the inactive apps require to be # destroyed too. @@ -3537,9 +3012,7 @@ class AppOperator(object): database and sets the abort flag if the apply/update/remove operation is still in progress. The corresponding app processing thread will check the flag and abort the operation in the very - next opportunity. The method also stops the Armada service and - clears locks in case the app processing thread has made a - request to Armada. + next opportunity. :param rpc_app: application object in the RPC request :param lifecycle_hook_info_app_abort: LifecycleHookInfo object @@ -3556,16 +3029,7 @@ class AppOperator(object): # Turn on the abort flag so the processing thread that is # in progress can bail out in the next opportunity. self._set_abort_flag(app.name) - if not app.is_fluxcd_app: - # Stop the Armada request in case it has reached this far and - # remove locks. - # TODO(jgauld): Need to correct lock mechanism, something is no - # longer working for application aborts. The lock lingers around, - # and only automatically get cleaned up after a long period. - # Subsequent reapply fails since it we cannot get lock. - with self._lock: - self._armada.stop_armada_request() - self._armada.clear_armada_locks() + else: # Either the previous operation has completed or already failed LOG.info("Abort request ignored. The previous operation for app %s " @@ -3650,57 +3114,27 @@ class AppOperator(object): self.sync_plugins_dir = generate_synced_app_plugins_dir( self._kube_app.get('name'), self._kube_app.get('app_version')) - self.sync_armada_mfile_dir = cutils.generate_synced_armada_dir( - self._kube_app.get('name'), - self._kube_app.get('app_version')) self.sync_fluxcd_manifest_dir = cutils.generate_synced_fluxcd_dir( self._kube_app.get('name'), self._kube_app.get('app_version')) # Files: DRBD synced between controllers - self.sync_armada_mfile = cutils.generate_synced_armada_manifest_fqpn( - self._kube_app.get('name'), - self._kube_app.get('app_version'), - self._kube_app.get('manifest_file')) self.sync_fluxcd_manifest = cutils.generate_synced_fluxcd_manifests_fqpn( self._kube_app.get('name'), self._kube_app.get('app_version')) - self.sync_armada_imgfile = generate_synced_images_fqpn( + self.sync_imgfile = generate_synced_fluxcd_images_fqpn( self._kube_app.get('name'), self._kube_app.get('app_version')) - self.sync_fluxcd_imgfile = generate_synced_fluxcd_images_fqpn( - self._kube_app.get('name'), - self._kube_app.get('app_version')) - self.sync_imgfile = self.sync_fluxcd_imgfile \ - if self.is_fluxcd_app else \ - self.sync_armada_imgfile - self.sync_armada_metadata_file = cutils.generate_synced_metadata_fqpn( + self.sync_metadata_file = cutils.generate_synced_fluxcd_metadata_fqpn( self._kube_app.get('name'), self._kube_app.get('app_version')) - self.sync_fluxcd_metadata_file = cutils.generate_synced_fluxcd_metadata_fqpn( - self._kube_app.get('name'), - self._kube_app.get('app_version')) - self.sync_metadata_file = self.sync_fluxcd_metadata_file \ - if self.is_fluxcd_app else \ - self.sync_armada_metadata_file - - # Files: FQPN formatted for the Armada pod - self.armada_service_mfile = generate_armada_service_manifest_fqpn( - self._kube_app.get('name'), - self._kube_app.get('app_version'), - self._kube_app.get('manifest_file')) self.patch_dependencies = [] self.charts = [] self.releases = [] - @property - def is_fluxcd_app(self): - return self._kube_app.get('manifest_name').endswith( - constants.APP_FLUXCD_MANIFEST_DIR) - @property def system_app(self): if (os.path.exists(self.sync_plugins_dir) and @@ -3763,34 +3197,18 @@ class AppOperator(object): self._kube_app.manifest_file = new_mfile self.inst_mfile = generate_install_manifest_fqpn( self.name, self.version, new_mfile) - if self.is_fluxcd_app: - self.sync_fluxcd_manifest = cutils.generate_synced_fluxcd_manifests_fqpn( - self.name, - self.version) - else: - self.armada_service_mfile = generate_armada_service_manifest_fqpn( - self.name, self.version, new_mfile) - self.sync_armada_mfile = cutils.generate_synced_armada_manifest_fqpn( - self.name, self.version, new_mfile) + self.sync_fluxcd_manifest = cutils.generate_synced_fluxcd_manifests_fqpn( + self.name, self.version) def regenerate_application_info(self, new_name, new_version, new_patch_dependencies): self._kube_app.name = new_name self._kube_app.app_version = new_version - if self.is_fluxcd_app: - new_fluxcd_dir = cutils.generate_synced_fluxcd_dir( - self.name, self.version) - shutil.move(self.sync_fluxcd_manifest_dir, new_fluxcd_dir) - shutil.rmtree(os.path.dirname(self.sync_fluxcd_manifest_dir)) - self.sync_fluxcd_manifest_dir = new_fluxcd_dir - new_sync_imgfile = generate_synced_fluxcd_images_fqpn(self.name, self.version) - else: - new_armada_dir = cutils.generate_synced_armada_dir( - self.name, self.version) - shutil.move(self.sync_armada_mfile_dir, new_armada_dir) - shutil.rmtree(os.path.dirname(self.sync_armada_mfile_dir)) - self.sync_armada_mfile_dir = new_armada_dir - new_sync_imgfile = generate_synced_images_fqpn(self.name, self.version) + new_fluxcd_dir = cutils.generate_synced_fluxcd_dir(self.name, self.version) + shutil.move(self.sync_fluxcd_manifest_dir, new_fluxcd_dir) + shutil.rmtree(os.path.dirname(self.sync_fluxcd_manifest_dir)) + self.sync_fluxcd_manifest_dir = new_fluxcd_dir + new_sync_imgfile = generate_synced_fluxcd_images_fqpn(self.name, self.version) new_path = os.path.join( constants.APP_INSTALL_PATH, self.name, self.version) @@ -4016,453 +3434,6 @@ class DockerHelper(object): return img_tag, rc -class ArmadaHelper(object): - """ Armada class to encapsulate Armada related operations """ - - def __init__(self, kube): - self._kube = kube - self._lock = threading.Lock() - - self.overrides_dir = common.HELM_OVERRIDES_PATH - self.manifests_dir = constants.APP_SYNCED_ARMADA_DATA_PATH - self.logs_dir = ARMADA_HOST_LOG_LOCATION - - # Generate kubectl wrapped bash command that can run in - # a specific container of a namespaced pod. - def wrap_kubectl_bash(self, name, namespace, exec_command, - container=None): - kcmd = ['kubectl', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF, - 'exec', '-n', namespace, name] - if container is not None: - kcmd.extend(['--container', container]) - kcmd.extend(['--', '/bin/bash', '-c', exec_command]) - return kcmd - - # Wrapper for kubectl exec to run bash commands in a specific container - # of a namespaced pod. - # Returns command stdout and stderr, and stderr if kubectl command fails. - # This should be replaced with the core kubernetes client API - # connect_get_namespaced_pod_exec when that can be made to work properly - # with error handling, separate stdout, stderr, timeout, poll and flush - # of output streams, and wait for command completion. - def kube_exec_container_bash(self, name, namespace, exec_command, - container=None): - kcmd = self.wrap_kubectl_bash(name, namespace, exec_command, - container=container) - stdout, stderr = cutils.trycmd(*kcmd, discard_warnings=True, - run_as_root=False) - return stdout, stderr - - # Wrapper for kubectl cp to a container. One of 'src' and 'dest' must - # be a remote file specification. - # Returns command stdout and stderr, and stderr if kubectl command fails. - # Limitation: kubectl cp command does not return an error when - # the source file does not exist. - # https://github.com/kubernetes/kubernetes/issues/78879 - def kube_cp_container(self, namespace, src, dest, container=None): - kcmd = ['kubectl', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF, - 'cp', '-n', namespace, src, dest] - if container is not None: - kcmd.extend(['--container', container]) - stdout, stderr = cutils.trycmd(*kcmd, discard_warnings=True, - run_as_root=False) - return stdout, stderr - - def copy_manifests_and_overrides_to_armada(self, armada_pod, mfile): - # NOTE: The armada pod may run on either controller. - # We do not want to mount host directories since DRBD - # /opt/platform is only visible on active controller. - # As a workaround, we can copy the required files into - # the armada container. - - # Derive manifests and overrides directories for both - # source source and destination paths. We use well-known - # directories and a filename given the following format. - # /manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest-del.yaml - manifests_dest = '{}/{}'.format(ARMADA_CONTAINER_TMP, 'manifests') - overrides_dest = '{}/{}'.format(ARMADA_CONTAINER_TMP, 'overrides') - app_name = mfile.split('/', 3)[2] - - # Create manifests and overrides directories in container - cmd = 'mkdir -v -p {}; mkdir -v -p {}'.\ - format(manifests_dest, overrides_dest) - stdout, stderr = self.kube_exec_container_bash( - armada_pod, ARMADA_NAMESPACE, cmd, container=ARMADA_CONTAINER_NAME) - if stderr: - LOG.error("Failed to create manifests and overrides, error: %s", - stderr) - return False - - # Copy manifests and overrides directories to container - # NOTE: kubectl cp command does not return an error when - # the source file does not exist. - # https://github.com/kubernetes/kubernetes/issues/78879 - src_dest_dirs = \ - [('{}/{}'.format(self.manifests_dir, app_name), - '{}:{}'.format(armada_pod, manifests_dest)), - ('{}/{}'.format(self.overrides_dir, app_name), - '{}:{}'.format(armada_pod, overrides_dest))] - for src_dir, dest_dir in src_dest_dirs: - # If there are no overrides it's not a fatal error. - if (src_dir.startswith(self.overrides_dir) and - not os.path.exists(src_dir)): - LOG.info("%s doesn't exist, skipping it." % src_dir) - continue - LOG.info("Copy %s to %s ." % (src_dir, dest_dir)) - stdout, stderr = self.kube_cp_container( - ARMADA_NAMESPACE, src_dir, dest_dir, - container=ARMADA_CONTAINER_NAME) - if stderr: - LOG.error("Failed to copy %s to %s, error: %s", - src_dir, dest_dir, stderr) - return False - return True - - def check_pod_ready_probe(self, pod): - """Pod is of the form returned by self._kube.kube_get_pods_by_selector. - Returns true if last probe shows the container is in 'Ready' state. - """ - conditions = list([x for x in pod.status.conditions if x.type == 'Ready']) - if not conditions: - return False - return conditions[0].status == 'True' - - def _prefer_select_one_running_ready_pod(self, pods): - """Find one running and ready pod. - Return found if one, otherwise first pod. - """ - for pod in pods: - if pod.status.phase == 'Running' and \ - pod.metadata.deletion_timestamp is None and \ - self.check_pod_ready_probe(pod): - return pod - return pods[0] - - def clear_armada_locks(self): - lock_name = "{}.{}.{}".format(ARMADA_LOCK_PLURAL, - ARMADA_LOCK_GROUP, - ARMADA_LOCK_NAME) - try: - self._kube.delete_custom_resource(ARMADA_LOCK_GROUP, - ARMADA_LOCK_VERSION, - ARMADA_LOCK_NAMESPACE, - ARMADA_LOCK_PLURAL, - lock_name) - except Exception: - # Best effort delete - LOG.warning("Failed to clear Armada locks.") - pass - - def _start_armada_service(self): - """Armada pod is managed by Kubernetes / Helm. - This routine checks and waits for armada to be providing service. - """ - - self.overrides_dir = common.HELM_OVERRIDES_PATH - self.manifests_dir = constants.APP_SYNCED_ARMADA_DATA_PATH - - try: - # Create the armada log folder if it does not exists - if not os.path.exists(ARMADA_HOST_LOG_LOCATION): - os.mkdir(ARMADA_HOST_LOG_LOCATION) - os.chmod(ARMADA_HOST_LOG_LOCATION, 0o755) - os.chown(ARMADA_HOST_LOG_LOCATION, 1000, - grp.getgrnam("sys_protected").gr_gid) - if not os.path.exists(common.HELM_OVERRIDES_PATH): - os.makedirs(common.HELM_OVERRIDES_PATH, 0o755) - except OSError as oe: - LOG.error("Unable to create armada log folder : %s" % oe) - return False - - # Wait for armada to be ready for cmd execution. - # NOTE: make_armada_requests() also has retry mechanism - TIMEOUT_DELTA = 5 - TIMEOUT_SLEEP = 5 - TIMEOUT_START_VALUE = 30 - - timeout = TIMEOUT_START_VALUE - while timeout > 0: - try: - pods = self._kube.kube_get_pods_by_selector( - ARMADA_NAMESPACE, - "application=%s" % ARMADA_APPLICATION, "") - if not pods: - raise RuntimeError('armada pod not found') - pod = self._prefer_select_one_running_ready_pod(pods) - - if pod and pod.status.phase != 'Running': - # Delete the pod, it should restart if it can - if not self._kube.kube_delete_pod(pod.metadata.name, - ARMADA_NAMESPACE, grace_periods_seconds=0): - LOG.warning("Pod %s/%s deletion unsuccessful...", - ARMADA_NAMESPACE, pod.metadata.name) - - if pod and pod.status.phase == 'Running' and \ - self.check_pod_ready_probe(pod): - # Test that we can copy files into armada-api container - src = '/etc/build.info' - dest_dir = '{}:{}'.format(pod.metadata.name, '/tmp') - stdout, stderr = self.kube_cp_container( - ARMADA_NAMESPACE, src, dest_dir, - container=ARMADA_CONTAINER_NAME) - if stderr: - LOG.error("Failed to copy %s to %s, error: %s", - src, dest_dir, stderr) - raise RuntimeError('armada pod not ready') - break - - except Exception as e: - LOG.info("Could not get Armada service : %s " % e) - - time.sleep(TIMEOUT_SLEEP) - timeout -= TIMEOUT_DELTA - - if timeout <= 0: - LOG.error("Failed to get Armada service after {seconds} seconds.". - format(seconds=TIMEOUT_START_VALUE)) - return False - - # We don't need to loop through the code that checks the pod's status - # again. Once the previous loop exits with pod 'Running' we can test - # the connectivity to the tiller postgres backend: - timeout = TIMEOUT_START_VALUE - while timeout > 0: - try: - _ = helm_utils.retrieve_helm_v2_releases() - break - except exception.HelmTillerFailure: - LOG.warn("Could not query Helm/Tiller releases") - time.sleep(TIMEOUT_SLEEP) - timeout -= TIMEOUT_DELTA - continue - except Exception as ex: - LOG.error("Unhandled exception : {error}".format(error=str(ex))) - return False - - if timeout <= 0: - LOG.error("Failed to query Helm/Tiller for {seconds} seconds.". - format(seconds=TIMEOUT_START_VALUE)) - return False - - return True - - def stop_armada_request(self): - """A simple way to cancel an on-going manifest apply/rollback/delete - request. This logic will be revisited in the future. - """ - - try: - pods = self._kube.kube_get_pods_by_selector( - ARMADA_NAMESPACE, "application=%s" % ARMADA_APPLICATION, "") - if not pods: - raise RuntimeError('armada pod not found') - for pod in pods: - if pod.status.phase == 'Running': - # Delete the pod, it should restart if it can - LOG.info("Stopping Armada service %s.", pod.metadata.name) - if not self._kube.kube_delete_pod(pod.metadata.name, - ARMADA_NAMESPACE, - grace_periods_seconds=0): - LOG.warning("Pod %s/%s deletion unsuccessful.", - ARMADA_NAMESPACE, pod.metadata.name) - except Exception as e: - LOG.error("Failed to stop Armada service : %s " % e) - - def make_armada_request(self, request, manifest_file='', overrides_str='', - app_releases=None, logfile=None): - - if logfile is None: - # Infer app name from the manifest file - # e.g., /tmp/manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest.yaml - app_name = manifest_file.split('/', 3)[2] - logname = time.strftime(app_name + '-' + request + '_%Y-%m-%d-%H-%M-%S.log') - logfile = ARMADA_HOST_LOG_LOCATION + '/' + logname - - if app_releases is None: - app_releases = [] - - rc = True - - # Configure additional armada options (e.g., such as --tiller-host), - # currently none are required. - tiller_host = " " - - LOG.debug('make_armada_request: request=%s, ' - 'manifest_file=%s, overrides_str=%s, ' - 'app_releases=%r, logfile=%r', - request, manifest_file, overrides_str, - app_releases, logfile) - try: - # Ensure armada service is ready. - with self._lock: - ret = self._start_armada_service() - - if ret: - # The armada pod name may change, get it each time - pods = self._kube.kube_get_pods_by_selector( - ARMADA_NAMESPACE, "application=%s" % ARMADA_APPLICATION, - "status.phase=Running") - if not pods: - raise RuntimeError('armada pod not found') - armada_pod = self._prefer_select_one_running_ready_pod(pods).metadata.name - if not self.copy_manifests_and_overrides_to_armada(armada_pod, manifest_file): - raise RuntimeError('could not access armada pod') - - if request == 'validate': - cmd = ''.join(['armada validate ', - ARMADA_CONTAINER_TMP, - manifest_file]) - LOG.info("Armada %s command: '%s'", request, cmd) - kcmd = self.wrap_kubectl_bash( - armada_pod, ARMADA_NAMESPACE, cmd, - container=ARMADA_CONTAINER_NAME) - p = subprocess.Popen(kcmd, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - with p.stdout, open(logfile, 'w') as log: - while p.poll() is None: - line = p.stdout.readline() - if line != b"": - log.write(line) - log.flush() - if p.returncode != 0: - rc = False - LOG.error("Failed to validate application manifest %s " - "with exit code %s. See %s for details." % - (manifest_file, p.returncode, logfile)) - else: - LOG.info("Manifest file %s was successfully validated." % - manifest_file) - - elif request == constants.APP_APPLY_OP: - cmd = ''.join(['armada apply --debug ', - '--enable-chart-cleanup ', - ARMADA_CONTAINER_TMP, - manifest_file, - overrides_str, - tiller_host]) - LOG.info("Armada %s command: '%s'", request, cmd) - kcmd = self.wrap_kubectl_bash( - armada_pod, ARMADA_NAMESPACE, cmd, - container=ARMADA_CONTAINER_NAME) - p = subprocess.Popen(kcmd, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - with p.stdout, open(logfile, 'w') as log: - while p.poll() is None: - line = p.stdout.readline() - if line != b"": - LOG.debug('%s: %s', request, line) - log.write(line) - log.flush() - if p.returncode != 0: - rc = False - LOG.error("Failed to apply application manifest %s " - "with exit code %s. See %s for details." % - (manifest_file, p.returncode, logfile)) - if p.returncode == CONTAINER_ABNORMAL_EXIT_CODE: - self.clear_armada_locks() - else: - LOG.info("Application manifest %s was successfully " - "applied/re-applied." % manifest_file) - - elif request == constants.APP_ROLLBACK_OP: - for app_release in app_releases: - release = app_release.get('release') - version = app_release.get('version') - sequenced = app_release.get('sequenced') - - if sequenced: - cmd = ''.join(['armada rollback --debug ', - '--wait --timeout 1800 ', - '--release ' + release + ' ', - '--version ' + str(version), - tiller_host]) - else: - cmd = ''.join(['armada rollback --debug ', - '--release ' + release + ' ', - '--version ' + str(version), - tiller_host]) - - LOG.info("Armada %s command: '%s'", request, cmd) - kcmd = self.wrap_kubectl_bash( - armada_pod, ARMADA_NAMESPACE, cmd, - container=ARMADA_CONTAINER_NAME) - p = subprocess.Popen(kcmd, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - with p.stdout, open(logfile, 'w') as log: - while p.poll() is None: - line = p.stdout.readline() - if line != "": - log.write(line) - log.flush() - if p.returncode != 0: - rc = False - LOG.error("Failed to rollback release %s " - "with exit code %s. See %s for details." % - (release, p.returncode, logfile)) - if p.returncode == CONTAINER_ABNORMAL_EXIT_CODE: - self.clear_armada_locks() - break - if rc: - LOG.info("Application releases %s were successfully " - "rolled back." % app_releases) - - elif request == constants.APP_DELETE_OP: - # Since armada delete doesn't support --values overrides - # files, use the delete manifest generated from the - # ArmadaManifestOperator during overrides generation. It - # will contain an accurate view of what was applied - manifest_delete_file = "%s-del%s" % os.path.splitext(manifest_file) - cmd = ''.join(['armada delete --debug ', - '--manifest ', - ARMADA_CONTAINER_TMP, - manifest_delete_file, - tiller_host]) - LOG.info("Armada %s command: '%s'", request, cmd) - kcmd = self.wrap_kubectl_bash( - armada_pod, ARMADA_NAMESPACE, cmd, - container=ARMADA_CONTAINER_NAME) - p = subprocess.Popen(kcmd, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - with p.stdout, open(logfile, 'w') as log: - while p.poll() is None: - line = p.stdout.readline() - if line != "": - log.write(line) - log.flush() - if p.returncode != 0: - rc = False - LOG.error("Failed to delete application manifest %s " - "with exit code %s. See %s for details." % - (manifest_file, p.returncode, logfile)) - if p.returncode == CONTAINER_ABNORMAL_EXIT_CODE: - self.clear_armada_locks() - else: - LOG.info("Application charts were successfully " - "deleted with manifest %s." % manifest_delete_file) - - else: - rc = False - LOG.error("Unsupported armada request: %s." % request) - else: - # Armada sevice failed to start/restart - rc = False - LOG.error("Armada service failed to start/restart") - except Exception as e: - rc = False - self.clear_armada_locks() - LOG.error("Armada request %s for manifest %s failed: %s " % - (request, manifest_file, e)) - return rc - - class AppImageParser(object): """Utility class to help find images for an application""" @@ -4529,7 +3500,7 @@ class AppImageParser(object): """Find image references in a nested dictionary. This function is used to find images from helm chart, - chart overrides file and armada manifest file. + chart overrides file and manifest file. :param var_dict: dict :return: a dict of image references diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index 44bded1f41..0c587249bc 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -372,7 +372,6 @@ class ConductorManager(service.PeriodicService): self._app = kube_app.AppOperator(self.dbapi, self._helm, self.apps_metadata) self._docker = kube_app.DockerHelper(self.dbapi) self._kube = kubernetes.KubeOperator() - self._armada = kube_app.ArmadaHelper(self._kube) self._kube_app_helper = kube_api.KubeAppHelper(self.dbapi) self._fernet = fernet.FernetOperator() @@ -6995,18 +6994,6 @@ class ConductorManager(service.PeriodicService): "activity") return - # Ensure that armada pod is running and ready. - pods = self._kube.kube_get_pods_by_selector("armada", - "application=armada", - "status.phase=Running") - for pod in pods: - if (pod.metadata.deletion_timestamp is None and - self._armada.check_pod_ready_probe(pod)): - break - else: - LOG.warning("Armada pod is not running and ready. Defer audit.") - return - # Ensure that FluxCD pods are ready. if not self._app.check_fluxcd_pod_status(): LOG.warning("FluxCD pods are not ready. Defer audit.") @@ -14212,8 +14199,8 @@ class ConductorManager(service.PeriodicService): # as well as removing the writing to disk of the new overrides old_hash = {} app.charts = self._app._get_list_of_charts(app) - (helm_files, armada_files) = self._app._get_overrides_files(app, None) - for f in helm_files + armada_files: + helm_files = self._app._get_overrides_files(app) + for f in helm_files: with open(f, 'rb') as file: old_hash[f] = hashlib.md5(file.read()).hexdigest() @@ -14223,10 +14210,9 @@ class ConductorManager(service.PeriodicService): app.charts = self._app._get_list_of_charts(app) self._helm.generate_helm_application_overrides( app.sync_overrides_dir, app.name, app.mode, cnamespace=None, - armada_format=True, chart_info=app.charts, combined=True, - is_fluxcd_app=app.is_fluxcd_app) - (helm_files, armada_files) = self._app._get_overrides_files(app, None) - for f in helm_files + armada_files: + chart_info=app.charts, combined=True) + helm_files = self._app._get_overrides_files(app) + for f in helm_files: with open(f, 'rb') as file: new_hash[f] = hashlib.md5(file.read()).hexdigest() diff --git a/sysinv/sysinv/sysinv/sysinv/helm/base.py b/sysinv/sysinv/sysinv/sysinv/helm/base.py index 20be9df3fd..f88f7566f6 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/base.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/base.py @@ -403,21 +403,6 @@ class BaseHelm(object): """ return True - def execute_manifest_updates(self, operator): - """ - Update the elements of the armada manifest. - - This allows a helm chart plugin to use the ArmadaManifestOperator to - make dynamic structural changes to the application manifest based on the - current conditions in the platform - - Changes include updates to manifest documents for the following schemas: - armada/Manifest/v1, armada/ChartGroup/v1, armada/Chart/v1. - - :param operator: an instance of the ArmadaManifestOperator - """ - pass - def execute_kustomize_updates(self, operator): """ Update the elements of FluxCD kustomize manifests. diff --git a/sysinv/sysinv/sysinv/sysinv/helm/common.py b/sysinv/sysinv/sysinv/sysinv/helm/common.py index 3a47605b62..64dde44ba2 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/common.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/common.py @@ -39,13 +39,12 @@ HELM_NS_CERT_MANAGER = 'cert-manager' HELM_NS_VAULT = 'vault' HELM_NS_NOTIFICATION = 'notification' HELM_NS_DEPLOYMENT = 'deployment' -HELM_NS_ARMADA = 'armada' HELM_NS_FLUX_HELM = 'flux-helm' # namespace groups for pod security admission controller PRIVILEGED_NS = [HELM_NS_CEPH, HELM_NS_NFS, HELM_NS_OPENSTACK, HELM_NS_HELM_TOOLKIT, HELM_NS_MONITOR, HELM_NS_RBD_PROVISIONER, HELM_NS_STORAGE_PROVISIONER, - HELM_NS_CERT_MANAGER, HELM_NS_VAULT, HELM_NS_DEPLOYMENT, HELM_NS_ARMADA, + HELM_NS_CERT_MANAGER, HELM_NS_VAULT, HELM_NS_DEPLOYMENT, HELM_NS_KUBE_SYSTEM, HELM_NS_NOTIFICATION, HELM_NS_FLUX_HELM] POD_SECURITY_VERSION = 'latest' diff --git a/sysinv/sysinv/sysinv/sysinv/helm/helm.py b/sysinv/sysinv/sysinv/sysinv/helm/helm.py index 0feb933a0b..e5304f4bb9 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/helm.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/helm.py @@ -10,7 +10,6 @@ from __future__ import absolute_import import eventlet import os -import re import tempfile import yaml @@ -18,7 +17,6 @@ from six import iteritems from stevedore import extension from oslo_log import log as logging -from sysinv.common import constants from sysinv.common import exception from sysinv.common import utils from sysinv.helm import common @@ -36,12 +34,6 @@ yaml.Dumper.ignore_aliases = lambda *data: True # The convention here is for the helm plugins to be named ###_PLUGINNAME. HELM_PLUGIN_PREFIX_LENGTH = 4 -# Number of optional characters appended to Armada manifest operator name, -# to allow overriding with a newer version of the Armada manifest operator. -# The convention here is for the Armada operator plugins to allow an -# optional suffix, as in PLUGINNAME_###. -ARMADA_PLUGIN_SUFFIX_LENGTH = 4 - # Number of optional characters appended to FluxCD kustomize operator name, to # allow overriding with a newer version of the FluxCD kustomize operator. The # convention here is for the FluxCD kustomize operator plugins to allow an @@ -83,27 +75,24 @@ class HelmOperator(object): # Define the stevedore namespaces that will need to be managed for plugins STEVEDORE_APPS = 'systemconfig.helm_applications' - STEVEDORE_ARMADA = 'systemconfig.armada.manifest_ops' STEVEDORE_FLUXCD = 'systemconfig.fluxcd.kustomize_ops' STEVEDORE_LIFECYCLE = 'systemconfig.app_lifecycle' def __init__(self, dbapi=None): self.dbapi = dbapi - # Find all plugins for apps, charts per app, and armada manifest - # operators + # Find all plugins for apps, charts per app, and fluxcd operators self.discover_plugins() @utils.synchronized(LOCK_NAME) def discover_plugins(self): """ Scan for all available plugins """ - LOG.debug("HelmOperator: Loading available helm, armada and lifecycle plugins.") + LOG.debug("HelmOperator: Loading available helm, fluxcd and lifecycle plugins.") # Initialize the plugins self.helm_system_applications = {} self.chart_operators = {} - self.armada_manifest_operators = {} self.fluxcd_kustomize_operators = {} self.app_lifecycle_operators = {} @@ -115,9 +104,6 @@ class HelmOperator(object): # dict containing sequence of helm charts per app self.helm_system_applications = self._load_helm_applications() - # dict containing Armada manifest operators per app - self.armada_manifest_operators = self._load_armada_manifest_operators() - # dict containing FluxCD kustomize operators per app self.fluxcd_kustomize_operators = self._load_fluxcd_kustomize_operators() @@ -153,32 +139,6 @@ class HelmOperator(object): LOG.info("Couldn't find endpoint distribution located at %s for " "%s" % (install_location, lifecycle_distribution)) - for armada_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA]: - armada_distribution = None - - try: - armada_distribution = utils.get_distribution_from_entry_point(armada_ep) - (project_name, project_location) = \ - utils.get_project_name_and_location_from_distribution(armada_distribution) - - if project_location == install_location: - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA].remove(armada_ep) - break - except exception.SysinvException: - # Temporary suppress errors on Debian until Stevedore is reworked. - # See https://storyboard.openstack.org/#!/story/2009101 - if utils.is_debian(): - LOG.info("Didn't find distribution for {}. Deleting from cache".format(armada_ep)) - try: - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA].remove(armada_ep) - except Exception as e: - LOG.info("Tried removing armada_ep {}, error: {}".format(armada_ep, e)) - else: - raise - else: - LOG.info("Couldn't find endpoint distribution located at %s for " - "%s" % (install_location, armada_distribution)) - for fluxcd_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_FLUXCD]: fluxcd_distribution = None @@ -252,7 +212,7 @@ class HelmOperator(object): # Temporary suppress errors on Debian until Stevedore is reworked. # See https://storyboard.openstack.org/#!/story/2009101 if utils.is_debian(): - LOG.info("Tried removing app_ep {}, error: {}".format(armada_ep, e)) + LOG.info("Tried removing app_ep {}, error: {}".format(app_ep, e)) continue else: raise @@ -277,12 +237,6 @@ class HelmOperator(object): else: LOG.info("No entry points for %s found." % self.STEVEDORE_APPS) - try: - del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA] - LOG.debug("Deleted entry points for %s." % self.STEVEDORE_ARMADA) - except KeyError: - LOG.info("No entry points for %s found." % self.STEVEDORE_ARMADA) - try: del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_FLUXCD] LOG.debug("Deleted entry points for %s." % self.STEVEDORE_FLUXCD) @@ -328,44 +282,6 @@ class HelmOperator(object): return operator - def _load_armada_manifest_operators(self): - """Build a dictionary of armada manifest operators""" - - operators_dict = {} - dist_info_dict = {} - - armada_manifest_operators = extension.ExtensionManager( - namespace=self.STEVEDORE_ARMADA, - invoke_on_load=True, invoke_args=()) - - sorted_armada_manifest_operators = sorted( - armada_manifest_operators.extensions, key=lambda x: x.name) - - for op in sorted_armada_manifest_operators: - if (op.name[-(ARMADA_PLUGIN_SUFFIX_LENGTH - 1):].isdigit() and - op.name[-ARMADA_PLUGIN_SUFFIX_LENGTH:-3] == '_'): - op_name = op.name[0:-ARMADA_PLUGIN_SUFFIX_LENGTH] - else: - op_name = op.name - operators_dict[op_name] = op.obj - - distribution = utils.get_distribution_from_entry_point(op.entry_point) - (project_name, project_location) = \ - utils.get_project_name_and_location_from_distribution(distribution) - - # Extract distribution information for logging - dist_info_dict[op_name] = { - 'name': project_name, - 'location': project_location, - } - - # Provide some log feedback on plugins being used - for (app_name, info) in iteritems(dist_info_dict): - LOG.debug("Plugins for %-20s: loaded from %-20s - %s." % (app_name, - info['name'], info['location'])) - - return operators_dict - def _load_fluxcd_kustomize_operators(self): """Build a dictionary of FluxCD kustomize operators""" @@ -404,16 +320,6 @@ class HelmOperator(object): return operators_dict - def get_armada_manifest_operator(self, app_name): - """Return a manifest operator based on app name""" - - plugin_name = utils.find_app_plugin_name(app_name) - if plugin_name in self.armada_manifest_operators: - manifest_op = self.armada_manifest_operators[plugin_name] - else: - manifest_op = self.armada_manifest_operators['generic'] - return manifest_op - def get_fluxcd_kustomize_operator(self, app_name): """Return a kustomize operator based on app name""" @@ -649,93 +555,6 @@ class HelmOperator(object): LOG.info(e) return overrides - def _get_helm_chart_location(self, chart_name, repo_name, chart_tarfile): - """Get the chart location. - - This method returns the download location for a given chart. - - :param chart_name: name of the chart - :param repo_name: name of the repo that chart uploaded to - :param chart_tarfile: name of the chart tarfile - :returns: a URL as location - """ - if repo_name is None: - repo_name = common.HELM_REPO_FOR_APPS - if chart_tarfile is None: - # TODO: Clean up the assumption - chart_tarfile = chart_name + '-0.1.0' - # Set the location based on ip address since - # http://controller does not resolve in armada container. - sys_controller_network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_CLUSTER_HOST) - sys_controller_network_addr_pool = self.dbapi.address_pool_get(sys_controller_network.pool_uuid) - sc_float_ip = sys_controller_network_addr_pool.floating_address - if utils.is_valid_ipv6(sc_float_ip): - sc_float_ip = '[' + sc_float_ip + ']' - return 'http://{}:{}/helm_charts/{}/{}.tgz'.format( - sc_float_ip, - utils.get_http_port(self.dbapi), repo_name, chart_tarfile) - - def _add_armada_override_header(self, chart_name, chart_metadata_name, repo_name, - chart_tarfile, namespace, overrides): - if chart_metadata_name is None: - chart_metadata_name = namespace + '-' + chart_name - - new_overrides = { - 'schema': 'armada/Chart/v1', - 'metadata': { - 'schema': 'metadata/Document/v1', - 'name': chart_metadata_name - }, - 'data': { - 'values': overrides - } - } - location = self._get_helm_chart_location(chart_name, repo_name, chart_tarfile) - if location: - new_overrides['data'].update({ - 'source': { - 'location': location - } - }) - return new_overrides - - def _get_chart_info_from_armada_chart(self, chart_name, chart_namespace, - chart_info_list): - """ Extract the metadata name of the armada chart, repo and the name of - the chart tarfile from the armada manifest chart. - - :param chart_name: name of the chart from the (application list) - :param chart_namespace: namespace of the chart - :param chart_info_list: a list of chart objects containing information - extracted from the armada manifest - :returns: the metadata name of the chart, the supported StarlingX repository, - the name of the chart tarfile or None,None,None if not present - """ - - # Could be called without any armada_manifest info. Returning 'None' - # will enable helm defaults to point to common.HELM_REPO_FOR_APPS - metadata_name = None - repo = None - chart_tarfile = None - if chart_info_list is None: - return metadata_name, repo, chart_tarfile - - location = None - for c in chart_info_list: - if (c.name == chart_name and - c.namespace == chart_namespace): - location = c.location - metadata_name = c.metadata_name - break - - if location: - match = re.search('/helm_charts/(.*)/(.*).tgz', location) - if match: - repo = match.group(1) - chart_tarfile = match.group(2) - LOG.debug("Chart %s can be found in repo: %s" % (chart_name, repo)) - return metadata_name, repo, chart_tarfile - def merge_overrides(self, file_overrides=None, set_overrides=None): """ Merge helm overrides together. @@ -832,186 +651,6 @@ class HelmOperator(object): @helm_context @utils.synchronized(LOCK_NAME) def generate_helm_application_overrides(self, path, app_name, - mode=None, - cnamespace=None, - armada_format=False, - chart_info=None, - combined=False, - is_fluxcd_app=False): - """Create the system overrides files for a supported application - - This method will generate system helm chart overrides yaml files for a - set of supported charts that comprise an application. If the namespace - is provided only the overrides files for that specified namespace will - be written. - - :param app_name: name of the bundle of charts required to support an - application - :param mode: mode to control how to apply application manifest - :param cnamespace: (optional) namespace - :param armada_format: (optional) whether to emit in armada format - instead of helm format (with extra header) - :param chart_info: (optional) supporting chart information - extracted from the armada manifest which is used to influence - overrides - :param combined: (optional) whether to apply user overrides on top of - system overrides - :param is_fluxcd_app: whether the app is fluxcd or not - """ - if is_fluxcd_app: - self._generate_helm_application_overrides_fluxcd( - path, app_name, mode, cnamespace, - chart_info, combined) - else: - self._generate_helm_application_overrides_armada( - path, app_name, mode, cnamespace, armada_format, - chart_info, combined) - - @helm_context - def _generate_helm_application_overrides_armada(self, path, app_name, - mode=None, - cnamespace=None, - armada_format=False, - chart_info=None, - combined=False): - """Create the system overrides files for a supported application - - This method will generate system helm chart overrides yaml files for a - set of supported charts that comprise an application. If the namespace - is provided only the overrides files for that specified namespace will - be written. - - :param app_name: name of the bundle of charts required to support an - application - :param mode: mode to control how to apply application manifest - :param cnamespace: (optional) namespace - :param armada_format: (optional) whether to emit in armada format - instead of helm format (with extra header) - :param chart_info: (optional) supporting chart information - extracted from the armada manifest which is used to influence - overrides - :param combined: (optional) whether to apply user overrides on top of - system overrides - """ - - app, plugin_name = self._find_kube_app_and_app_plugin_name(app_name) - - # Get a manifest operator to provide a single point of - # manipulation for the chart, chart group and manifest schemas - manifest_op = self.get_armada_manifest_operator(app.name) - - # Load the manifest into the operator - armada_manifest = utils.generate_synced_armada_manifest_fqpn( - app.name, app.app_version, app.manifest_file) - manifest_op.load(armada_manifest) - - if plugin_name in self.helm_system_applications: - app_overrides = self._get_helm_application_overrides(plugin_name, - cnamespace) - for (chart_name, overrides) in iteritems(app_overrides): - if combined: - # The overrides at this point are the system overrides. For - # charts with multiple namespaces, the overrides would - # contain multiple keys, one for each namespace. - # - # Retrieve the user overrides of each namespace from the - # database and merge this list of user overrides, if they - # exist, with the system overrides. Both system and user - # override contents are then merged based on the namespace, - # prepended with required header and written to - # corresponding files (-.yaml). - file_overrides = [] - for chart_namespace in overrides.keys(): - try: - db_chart = self.dbapi.helm_override_get( - app.id, chart_name, chart_namespace) - db_user_overrides = db_chart.user_overrides - if db_user_overrides: - file_overrides.append(yaml.dump( - {chart_namespace: yaml.load(db_user_overrides)})) - except exception.HelmOverrideNotFound: - pass - - if file_overrides: - # Use dump() instead of safe_dump() as the latter is - # not agreeable with password regex in some overrides - system_overrides = yaml.dump(overrides) - file_overrides.insert(0, system_overrides) - combined_overrides = self.merge_overrides( - file_overrides=file_overrides) - overrides = yaml.load(combined_overrides) - - # If armada formatting is wanted, we need to change the - # structure of the yaml file somewhat - for key in overrides: - metadata_name, repo_name, chart_tarfile = \ - self._get_chart_info_from_armada_chart(chart_name, key, - chart_info) - new_overrides = self._add_armada_override_header( - chart_name, metadata_name, repo_name, chart_tarfile, - key, overrides[key]) - overrides[key] = new_overrides - self._write_chart_overrides(path, chart_name, cnamespace, overrides) - - # Update manifest docs based on the plugin directives. If the - # application does not provide a manifest operator, the - # GenericArmadaManifestOperator is used and chart specific - # operations can be skipped. - if manifest_op.APP: - if chart_name in self.chart_operators: - self.chart_operators[chart_name].execute_manifest_updates( - manifest_op) - - # Update the manifest based on platform conditions - manifest_op.platform_mode_manifest_updates(self.dbapi, mode) - - else: - # Generic applications - for chart in chart_info: - try: - db_chart = self.dbapi.helm_override_get( - app.id, chart.name, chart.namespace) - except exception.HelmOverrideNotFound: - # This routine is to create helm overrides entries - # in database during application-upload so that user - # can list the supported helm chart overrides of the - # application via helm-override-list - try: - values = { - 'name': chart.name, - 'namespace': chart.namespace, - 'app_id': app.id, - } - db_chart = self.dbapi.helm_override_create(values=values) - except Exception as e: - LOG.exception(e) - return - - user_overrides = {chart.namespace: {}} - db_user_overrides = db_chart.user_overrides - if db_user_overrides: - user_overrides = yaml.load(yaml.dump( - {chart.namespace: yaml.load(db_user_overrides)})) - - metadata_name, repo_name, chart_tarfile =\ - self._get_chart_info_from_armada_chart(chart.name, chart.namespace, - chart_info) - new_overrides = self._add_armada_override_header( - chart.name, metadata_name, repo_name, chart_tarfile, - chart.namespace, user_overrides[chart.namespace]) - user_overrides[chart.namespace] = new_overrides - - self._write_chart_overrides(path, chart.name, - cnamespace, user_overrides) - - # Write the manifest doc overrides, a summmary file for easy --value - # generation on the apply, and a unified manifest for deletion. - manifest_op.save_overrides() - manifest_op.save_summary(path=path) - manifest_op.save_delete_manifest() - - @helm_context - def _generate_helm_application_overrides_fluxcd(self, path, app_name, mode=None, cnamespace=None, chart_info=None, @@ -1188,7 +827,7 @@ class HelmOperator(object): yaml.dump(overrides, f, default_flow_style=False) os.close(fd) os.rename(tmppath, filepath) - # Change the permission to be readable to non-root users(ie.Armada) + # Change the permission to be readable to non-root users os.chmod(filepath, 0o644) except Exception: LOG.exception("failed to write overrides file: %s" % filepath) diff --git a/sysinv/sysinv/sysinv/sysinv/helm/lifecycle_hook.py b/sysinv/sysinv/sysinv/sysinv/helm/lifecycle_hook.py index 090af347cc..79472599b3 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/lifecycle_hook.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/lifecycle_hook.py @@ -22,7 +22,7 @@ class LifecycleHookInfo(base.SysinvObject): Attributes: mode (string): Manual or Auto lifecycle_type (string): Type of the hook (semantic check, operation - rbd, resource, manifest, armada-request). + rbd, resource, manifest, fluxcd-request). relative_timing (string): Relative timing to the operation (pre/post). operation (string): Operation being performed. extra (dict): Can populate data here and it is passed to the outside. diff --git a/sysinv/sysinv/sysinv/sysinv/helm/manifest_base.py b/sysinv/sysinv/sysinv/sysinv/helm/manifest_base.py deleted file mode 100644 index 3cab9af03d..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/helm/manifest_base.py +++ /dev/null @@ -1,519 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright (c) 2019-2022 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# All Rights Reserved. -# - -""" System inventory Armada manifest operator.""" - -import abc -import io -import os -import json -import ruamel.yaml as yaml -import six -import tempfile - -from glob import glob -from six import iteritems -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -KEY_SCHEMA = 'schema' -VAL_SCHEMA_MANIFEST = 'armada/Manifest/v1' -VAL_SCHEMA_CHART_GROUP = 'armada/ChartGroup/v1' -VAL_SCHEMA_CHART = 'armada/Chart/v1' - -KEY_METADATA = 'metadata' -KEY_METADATA_NAME = 'name' - -KEY_DATA = 'data' -KEY_DATA_CHART_GROUPS = 'chart_groups' # for manifest doc updates -KEY_DATA_CHART_GROUP = 'chart_group' # for chart group doc updates -KEY_DATA_CHART_NAME = 'chart_name' # for chart doc updates - -# Attempt to keep a compact filename -FILE_PREFIX = { - KEY_DATA_CHART_GROUPS: 'm-', # for manifest doc overrides - KEY_DATA_CHART_GROUP: 'cg-', # for chart group doc overrides - KEY_DATA_CHART_NAME: 'c-' # for chart doc overrides -} -FILE_SUFFIX = '-meta.yaml' -SUMMARY_FILE = 'armada-overrides.yaml' - - -@six.add_metaclass(abc.ABCMeta) -class ArmadaManifestOperator(object): - - def __init__(self, manifest_fqpn=None): - self.manifest_path = None # Location to write overrides - self.delete_manifest = None # Unified manifest for app deletion - - self.content = [] # original app manifest content - - self.docs = { - KEY_DATA_CHART_GROUPS: {}, # LUT for all manifest docs - KEY_DATA_CHART_GROUP: {}, # LUT for all chart group docs - KEY_DATA_CHART_NAME: {} # LUT for all chart docs - } - - self.updated = { - KEY_DATA_CHART_GROUPS: set(), # indicate manifest doc change - KEY_DATA_CHART_GROUP: set(), # indicate chart group update - KEY_DATA_CHART_NAME: set() # indicate chart doc update - } - - if manifest_fqpn: - self.load(manifest_fqpn) - - def __str__(self): - return json.dumps({ - 'manifest': self.docs[KEY_DATA_CHART_GROUPS], - 'chart_groups': self.docs[KEY_DATA_CHART_GROUP], - 'charts': self.docs[KEY_DATA_CHART_NAME], - }, indent=2) - - def load_summary(self, path): - """ Load the list of generated overrides files - - Generate a list of override files that were written for the manifest. - This is used to generate Armada --values overrides for the manifest. - - :param path: location of the overrides summary file - :return: a list of override files written - """ - files_written = [] - summary_fqpn = os.path.join(path, SUMMARY_FILE) - if os.path.exists(summary_fqpn): - self.manifest_path = os.path.dirname(summary_fqpn) - with io.open(summary_fqpn, 'r', encoding='utf-8') as f: - # The RoundTripLoader removes the superfluous quotes by default, - # resulting the dumped out charts not readable in Armada. - # Set preserve_quotes=True to preserve all the quotes. - files_written = list(yaml.load_all( - f, Loader=yaml.RoundTripLoader, preserve_quotes=True))[0] - return files_written - - def load(self, manifest_fqpn): - """ Load the application manifest for processing - - :param manifest_fqpn: fully qualified path name of the application manifest - """ - if os.path.exists(manifest_fqpn): - # Save the path for writing overrides files - self.manifest_path = os.path.dirname(manifest_fqpn) - - # Save the name for a delete manifest - self.delete_manifest = "%s-del%s" % os.path.splitext(manifest_fqpn) - - with io.open(manifest_fqpn, 'r', encoding='utf-8') as f: - # The RoundTripLoader removes the superfluous quotes by default, - # resulting the dumped out charts not readable in Armada. - # Set preserve_quotes=True to preserve all the quotes. - self.content = list(yaml.load_all( - f, Loader=yaml.RoundTripLoader, preserve_quotes=True)) - - # Generate the lookup tables - # For the individual chart docs - self.docs[KEY_DATA_CHART_NAME] = { - i[KEY_METADATA][KEY_METADATA_NAME]: i - for i in self.content - if i[KEY_SCHEMA] == VAL_SCHEMA_CHART} - - # For the chart group docs - self.docs[KEY_DATA_CHART_GROUP] = { - i[KEY_METADATA][KEY_METADATA_NAME]: i - for i in self.content - if i[KEY_SCHEMA] == VAL_SCHEMA_CHART_GROUP} - - # For the single manifest doc - self.docs[KEY_DATA_CHART_GROUPS] = { - i[KEY_METADATA][KEY_METADATA_NAME]: i - for i in self.content - if i[KEY_SCHEMA] == VAL_SCHEMA_MANIFEST} - else: - LOG.error("Manifest file %s does not exist" % manifest_fqpn) - - def _cleanup_meta_files(self, path): - """ Remove any previously written overrides files - - :param path: directory containing manifest overrides files - """ - for k, v in iteritems(FILE_PREFIX): - fileregex = "{}*{}".format(v, FILE_SUFFIX) - filepath = os.path.join(self.manifest_path, fileregex) - for f in glob(filepath): - os.remove(f) - - def _cleanup_deletion_manifest(self): - """ Remove any previously written deletion manifest - """ - if self.delete_manifest and os.path.exists(self.delete_manifest): - os.remove(self.delete_manifest) - - def _write_file(self, path, filename, pathfilename, data): - """ Write a yaml file - - :param path: path to write the file - :param filename: name of the file - :param pathfilename: FQPN of the file - :param data: file data - """ - try: - fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename, - text=True) - - with open(tmppath, 'w') as f: - yaml.dump(data, f, Dumper=yaml.RoundTripDumper, - default_flow_style=False) - os.close(fd) - os.rename(tmppath, pathfilename) - # Change the permission to be readable to non-root - # users(ie.Armada) - os.chmod(pathfilename, 0o644) - except Exception: - if os.path.exists(tmppath): - os.remove(tmppath) - LOG.exception("Failed to write meta overrides %s" % pathfilename) - raise - - def save_summary(self, path=None): - """ Write a yaml file containing the list of override files generated - - :param path: optional alternative location to write the file - """ - files_written = [] - for k, v in iteritems(self.updated): - for i in v: - filename = '{}{}{}'.format(FILE_PREFIX[k], i, FILE_SUFFIX) - filepath = os.path.join(self.manifest_path, filename) - files_written.append(filepath) - - # Write the list of files generated. This can be read to include with - # the Armada overrides - if path and os.path.exists(path): - # if provided, write to an alternate location - self._write_file(path, SUMMARY_FILE, - os.path.join(path, SUMMARY_FILE), - files_written) - else: - # if not provided, write to the armada directory - self._write_file(self.manifest_path, SUMMARY_FILE, - os.path.join(self.manifest_path, SUMMARY_FILE), - files_written) - - def save_overrides(self): - """ Save the overrides files - - Write the elements of the manifest (manifest, chart_group, chart) that - was updated into an overrides file. The files are written to the same - directory as the application manifest. - """ - if self.manifest_path and os.path.exists(self.manifest_path): - - # cleanup any existing meta override files - self._cleanup_meta_files(self.manifest_path) - - # Only write the updated docs as meta overrides - for k, v in iteritems(self.updated): - for i in v: - filename = '{}{}{}'.format(FILE_PREFIX[k], i, FILE_SUFFIX) - filepath = os.path.join(self.manifest_path, filename) - self._write_file(self.manifest_path, filename, filepath, - self.docs[k][i]) - else: - LOG.error("Manifest directory %s does not exist" % self.manifest_path) - - def save_delete_manifest(self): - """ Save an updated manifest for deletion - - armada delete doesn't support --values files as does the apply. To - handle proper deletion of the conditional charts/chart groups that end - up in the overrides files, create a unified file for use when deleting. - - NOTE #1: If we want to abandon using manifest overrides files altogether, - this generated file could probably be used on apply and delete. - - NOTE #2: Diffing the original manifest and this manifest provides a - clear view of the conditional changes that were enforced by the system - in the plugins - """ - if self.manifest_path and os.path.exists(self.manifest_path): - - # cleanup existing deletion manifest - self._cleanup_deletion_manifest() - - with open(self.delete_manifest, 'w') as f: - try: - yaml.dump_all(self.content, f, Dumper=yaml.RoundTripDumper, - explicit_start=True, - default_flow_style=False) - LOG.debug("Delete manifest file %s generated" % - self.delete_manifest) - except Exception as e: - LOG.error("Failed to generate delete manifest file %s: " - "%s" % (self.delete_manifest, e)) - else: - LOG.error("Manifest directory %s does not exist" % self.manifest_path) - - def _validate_manifest(self, manifest): - """ Ensure that the manifest is known - - :param manifest: name of the manifest - """ - if manifest not in self.docs[KEY_DATA_CHART_GROUPS]: - LOG.error("%s is not %s" % (manifest, self.docs[KEY_DATA_CHART_GROUPS].keys())) - return False - return True - - def _validate_chart_group(self, chart_group): - """ Ensure that the chart_group is known - - :param chart_group: name of the chart_group - """ - if chart_group not in self.docs[KEY_DATA_CHART_GROUP]: - LOG.error("%s is an unknown chart_group" % chart_group) - return False - return True - - def _validate_chart_groups_from_list(self, chart_group_list): - """ Ensure that all the charts groups in chart group list are known - - :param chart_group_list: list of chart groups - """ - for cg in chart_group_list: - if not self._validate_chart_group(cg): - return False - return True - - def _validate_chart(self, chart): - """ Ensure that the chart is known - - :param chart: name of the chart - """ - if chart not in self.docs[KEY_DATA_CHART_NAME]: - LOG.error("%s is an unknown chart" % chart) - return False - return True - - def _validate_chart_from_list(self, chart_list): - """ Ensure that all the charts in chart list are known - - :param chart_list: list of charts - """ - for c in chart_list: - if not self._validate_chart(c): - return False - return True - - def manifest_chart_groups_delete(self, manifest, chart_group): - """ Delete a chart group from a manifest - - This method will delete a chart group from a manifest's list of charts - groups. - - :param manifest: manifest containing the list of chart groups - :param chart_group: chart group name to delete - """ - if (not self._validate_manifest(manifest) or - not self._validate_chart_group(chart_group)): - return - - if chart_group not in self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][ - KEY_DATA_CHART_GROUPS]: - LOG.info("%s is not currently enabled. Cannot delete." % - chart_group) - return - - self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][ - KEY_DATA_CHART_GROUPS].remove(chart_group) - self.updated[KEY_DATA_CHART_GROUPS].update([manifest]) - - def manifest_chart_groups_insert(self, manifest, chart_group, before_group=None): - """ Insert a chart group into a manifest - - This method will insert a chart group into a manifest at the end of the - list of chart groups. If the before_group parameter is used the chart - group can be placed at a specific point in the chart group list. - - :param manifest: manifest containing the list of chart groups - :param chart_group: chart group name to insert - :param before_group: chart group name to be appear after the inserted - chart group in the list - """ - if (not self._validate_manifest(manifest) or - not self._validate_chart_group(chart_group)): - return - - if chart_group in self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS]: - LOG.error("%s is already enabled. Cannot insert." % - chart_group) - return - - if before_group: - if not self._validate_chart_group(before_group): - return - - if before_group not in self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][ - KEY_DATA_CHART_GROUPS]: - LOG.error("%s is not currently enabled. Cannot insert %s" % - (before_group, chart_group)) - return - - cgs = self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS] - insert_index = cgs.index(before_group) - cgs.insert(insert_index, chart_group) - self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS] = cgs - else: - self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][ - KEY_DATA_CHART_GROUPS].append(chart_group) - - self.updated[KEY_DATA_CHART_GROUPS].update([manifest]) - - def manifest_chart_groups_set(self, manifest, chart_group_list=None): - """ Set the chart groups for a specific manifest - - This will replace the current set of charts groups in the manifest as - specified by the armada/Manifest/v1 schema with the provided list of - chart groups. - - :param manifest: manifest containing the list of chart groups - :param chart_group_list: list of chart groups to replace the current set - of chart groups - """ - if not self._validate_manifest(manifest): - return - - if chart_group_list: - if not self._validate_chart_groups_from_list(chart_group_list): - return - - self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS] = chart_group_list - - self.updated[KEY_DATA_CHART_GROUPS].update([manifest]) - - else: - LOG.error("Cannot set the manifest chart_groups to an empty list") - - def chart_group_chart_delete(self, chart_group, chart): - """ Delete a chart from a chart group - - This method will delete a chart from a chart group's list of charts. - - :param chart_group: chart group name - :param chart: chart name to remove from the chart list - """ - if (not self._validate_chart_group(chart_group) or - not self._validate_chart(chart)): - return - - if chart not in self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][ - KEY_DATA_CHART_GROUP]: - LOG.info("%s is not currently enabled. Cannot delete." % - chart) - return - - self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][ - KEY_DATA_CHART_GROUP].remove(chart) - self.updated[KEY_DATA_CHART_GROUP].update([chart_group]) - - def chart_group_chart_insert(self, chart_group, chart, before_chart=None): - """ Insert a chart into a chart group - - This method will insert a chart into a chart group at the end of the - list of charts. If the before_chart parameter is used the chart can be - placed at a specific point in the chart list. - - :param chart_group: chart group name - :param chart: chart name to insert - :param before_chart: chart name to be appear after the inserted chart in - the list - """ - if (not self._validate_chart_group(chart_group) or - not self._validate_chart(chart)): - return - - if chart in self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP]: - LOG.error("%s is already enabled. Cannot insert." % - chart) - return - - if before_chart: - if not self._validate_chart(before_chart): - return - - if before_chart not in self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][ - KEY_DATA_CHART_GROUP]: - LOG.error("%s is not currently enabled. Cannot insert %s" % - (before_chart, chart)) - return - - cg = self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP] - insert_index = cg.index(before_chart) - cg.insert(insert_index, chart) - self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP] = cg - else: - self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][ - KEY_DATA_CHART_GROUP].append(chart) - - self.updated[KEY_DATA_CHART_GROUP].update([chart_group]) - - def chart_group_set(self, chart_group, chart_list=None): - """ Set the charts for a specific chart group - - This will replace the current set of charts specified in the chart group - with the provided list. - - :param chart_group: chart group name - :param chart_list: list of charts to replace the current set of charts - """ - if not self._validate_chart_group(chart_group): - return - - if chart_list: - if not self._validate_chart_from_list(chart_list): - return - - self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP] = chart_list - - self.updated[KEY_DATA_CHART_GROUP].update([chart_group]) - - else: - LOG.error("Cannot set the chart_group charts to an empty list") - - def chart_group_add(self, chart_group, data): - """ Add a new chart group to the manifest. - - To support a self-contained dynamic plugin, this method is called to - introduced a new chart group based on the armada/ChartGroup/v1 schema. - - :param chart_group: chart group name - :param data: chart group data - """ - # Not implemented... yet. - pass - - def chart_add(self, chart, data): - """ Add a new chart to the manifest. - - To support a self-contained dynamic plugin, this method is called to - introduced a new chart based on the armada/Chart/v1 schema. - - :param chart: chart name - :param data: chart data - """ - # Not implemented... yet. - pass - - @abc.abstractmethod - def platform_mode_manifest_updates(self, dbapi, mode): - """ Update the application manifest based on the platform - - :param dbapi: DB api object - :param mode: mode to control how to apply the application manifest - """ - pass diff --git a/sysinv/sysinv/sysinv/sysinv/helm/manifest_generic.py b/sysinv/sysinv/sysinv/sysinv/helm/manifest_generic.py deleted file mode 100644 index 677ce4fca3..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/helm/manifest_generic.py +++ /dev/null @@ -1,29 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# All Rights Reserved. -# - -""" System inventory Armada manifest operator.""" - -from sysinv.helm import manifest_base as base - - -class GenericArmadaManifestOperator(base.ArmadaManifestOperator): - - APP = None - ARMADA_MANIFEST = None - - CHART_GROUPS_LUT = {} - CHARTS_LUT = {} - - def platform_mode_manifest_updates(self, dbapi, mode): - """ Update the application manifest based on the platform - - :param dbapi: DB api object - :param mode: mode to control how to apply the application manifest - """ - pass diff --git a/sysinv/sysinv/sysinv/sysinv/helm/utils.py b/sysinv/sysinv/sysinv/sysinv/helm/utils.py index e0d3dd764b..55ef9c1341 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/utils.py @@ -12,7 +12,6 @@ import base64 import os import psutil -import retrying import ruamel.yaml as yaml import tempfile import threading @@ -121,59 +120,8 @@ def retrieve_helm_v3_releases(): timer.cancel() -@retry(stop_max_attempt_number=6, wait_fixed=20 * 1000, - retry_on_exception=_retry_on_HelmTillerFailure) -def retrieve_helm_v2_releases(): - env = os.environ.copy() - env['PATH'] = '/usr/local/sbin:' + env['PATH'] - env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF - helm_list = subprocess.Popen( - ['helmv2-cli', '--', - 'helm', - 'list', '--output', 'yaml', '--tiller-connection-timeout', '5'], - env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - universal_newlines=True) - timer = threading.Timer(20, kill_process_and_descendants, [helm_list]) - - try: - timer.start() - out, err = helm_list.communicate() - if helm_list.returncode != 0: - if err: - raise exception.HelmTillerFailure(reason=err) - - # killing the subprocesses with +kill() when timer expires returns EBADF - # because the pipe is closed, but no error string on stderr. - if helm_list.returncode == -9: - raise exception.HelmTillerFailure( - reason="helmv2-cli -- helm list operation timed out after " - "20 seconds. Terminated by threading timer.") - raise exception.HelmTillerFailure( - reason="helmv2-cli -- helm list operation failed without " - "error message, errno=%s" % helm_list.returncode) - - deployed_releases = {} - if out: - output = yaml.safe_load(out) - releases = output.get('Releases', {}) - for r in releases: - r_name = r.get('Name') - r_version = r.get('Revision') - r_namespace = r.get('Namespace') - - deployed_releases.setdefault(r_name, {}).update( - {r_namespace: r_version}) - - return deployed_releases - except Exception as e: - raise exception.HelmTillerFailure( - reason="Failed to retrieve helmv2 releases: %s" % e) - finally: - timer.cancel() - - def retrieve_helm_releases(): - """Retrieve the deployed helm releases from tiller + """Retrieve the deployed helm releases Get the name, namespace and version for the deployed releases by querying helm tiller @@ -182,58 +130,10 @@ def retrieve_helm_releases(): deployed_releases = {} deployed_releases.update(retrieve_helm_v3_releases()) - deployed_releases.update(retrieve_helm_v2_releases()) return deployed_releases -def delete_helm_release(release): - """Delete helm v2 release - - This method deletes a helm v2 release without --purge which removes - all associated resources from kubernetes but not from the store(ETCD) - - In the scenario of updating application, the method is needed to clean - up the releases if there were deployed releases in the old application - but not in the new application - - :param release: the name of the helm release - """ - # NOTE: This mechanism deletes armada/tiller managed releases. - # This could be adapted to also delete helm v3 releases using - # 'helm uninstall'. - env = os.environ.copy() - env['PATH'] = '/usr/local/sbin:' + env['PATH'] - env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF - helm_cmd = subprocess.Popen( - ['helmv2-cli', '--', - 'helm', 'delete', release, '--tiller-connection-timeout', '5'], - env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - universal_newlines=True) - timer = threading.Timer(20, kill_process_and_descendants, [helm_cmd]) - - try: - timer.start() - out, err = helm_cmd.communicate() - if err and not out: - if ("deletion completed" or "not found" or "is already deleted") in err: - LOG.debug("Release %s not found or deleted already" % release) - return True - raise exception.HelmTillerFailure( - reason="Failed to delete release: %s" % err) - elif not err and not out: - err_msg = "Failed to delete release. " \ - "Helm tiller response timeout." - raise exception.HelmTillerFailure(reason=err_msg) - return True - except Exception as e: - LOG.error("Failed to delete release: %s" % e) - raise exception.HelmTillerFailure( - reason="Failed to delete release: %s" % e) - finally: - timer.cancel() - - def delete_helm_v3_release(release, namespace="default", flags=None): """Delete helm v3 release @@ -278,67 +178,6 @@ def delete_helm_v3_release(release, namespace="default", flags=None): timer.cancel() -def _retry_on_HelmTillerFailure_reset_tiller(ex): - LOG.info('Caught HelmTillerFailure exception. Resetting tiller and retrying... ' - 'Exception: {}'.format(ex)) - env = os.environ.copy() - env['PATH'] = '/usr/local/sbin:' + env['PATH'] - env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF - helm_reset = subprocess.Popen( - ['helmv2-cli', '--', - 'helm', 'reset', '--force'], - env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - universal_newlines=True) - timer = threading.Timer(20, kill_process_and_descendants, [helm_reset]) - - try: - timer.start() - out, err = helm_reset.communicate() - if helm_reset.returncode == 0: - return isinstance(ex, exception.HelmTillerFailure) - elif err: - raise exception.HelmTillerFailure(reason=err) - else: - err_msg = "helmv2-cli -- helm reset operation failed." - raise exception.HelmTillerFailure(reason=err_msg) - except Exception as e: - raise exception.HelmTillerFailure( - reason="Failed to reset tiller: %s" % e) - finally: - timer.cancel() - - -@retrying.retry(stop_max_attempt_number=2, - retry_on_exception=_retry_on_HelmTillerFailure_reset_tiller) -def get_openstack_pending_install_charts(): - env = os.environ.copy() - env['PATH'] = '/usr/local/sbin:' + env['PATH'] - env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF - helm_list = subprocess.Popen( - ['helmv2-cli', '--', - 'helm', 'list', '--namespace', 'openstack', - '--pending', '--tiller-connection-timeout', '5'], - env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - universal_newlines=True) - timer = threading.Timer(20, kill_process_and_descendants, [helm_list]) - - try: - timer.start() - out, err = helm_list.communicate() - if helm_list.returncode == 0: - return out - elif err: - raise exception.HelmTillerFailure(reason=err) - else: - err_msg = "helmv2-cli -- helm list operation timeout." - raise exception.HelmTillerFailure(reason=err_msg) - except Exception as e: - raise exception.HelmTillerFailure( - reason="Failed to obtain pending charts list: %s" % e) - finally: - timer.cancel() - - def install_helm_chart_with_dry_run(args=None): """Simulate a chart install diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py index 9d7aab0ff8..5168b9b311 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py @@ -415,7 +415,7 @@ class TestPostKubeUpgrade(TestKubeUpgrade, dbutils.create_test_app( name='stx-openstack', app_version='1.0-19', - manifest_name='openstack-armada-manifest', + manifest_name='manifest', manifest_file='stx-openstack.yaml', status='applied', active=True) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_image_parser.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_image_parser.py index 6ca92b69fd..2f92026ff6 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_image_parser.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_image_parser.py @@ -197,7 +197,7 @@ class TestKubeAppImageParser(base.TestCase): self.assertEqual(expected, images_dict_with_local_registry) def test_generate_download_images_with_merge_dict(self): - armada_chart_imgs = copy.deepcopy(IMAGES_RESOURCE) + chart_imgs = copy.deepcopy(IMAGES_RESOURCE) override_imgs = { 'images': { @@ -313,7 +313,7 @@ class TestKubeAppImageParser(base.TestCase): } download_imgs_dict = self.image_parser.merge_dict( - armada_chart_imgs, override_imgs) + chart_imgs, override_imgs) self.assertEqual(expected, download_imgs_dict) def test_generate_download_images_list(self): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py deleted file mode 100644 index 6f19f75b7c..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import keyring -import mock - -from sysinv.helm.helm import HelmOperator -from sysinv.helm.manifest_base import ArmadaManifestOperator - -from sysinv.tests.db import utils as dbutils -from sysinv.tests.helm import base as helm_base - - -class HelmOperatorTestSuiteMixin(helm_base.HelmTestCaseMixin): - """When HelmOperatorTestSuiteMixin is added as a Mixin - alongside a subclass of BaseHostTestCase - these testcases are added to it - This also requires an AppMixin to provide app_name - """ - def setUp(self): - super(HelmOperatorTestSuiteMixin, self).setUp() - self.app = dbutils.create_test_app(name=self.app_name) - # If a ceph keyring entry is missing, a subprocess will be invoked - # so a fake keyring password is being supplied here. - keyring.set_password('glance', 'admin_keyring', 'FakePassword1*') - - # Armada routines that write to disk can be mocked away - save_overrides = mock.patch.object(ArmadaManifestOperator, - 'save_overrides') - self.mock_save_overrides = save_overrides.start() - self.addCleanup(save_overrides.stop) - - save_delete_manifest = mock.patch.object(ArmadaManifestOperator, - 'save_delete_manifest') - save_delete_manifest.start() - self.addCleanup(save_delete_manifest.stop) - - save_summary = mock.patch.object(ArmadaManifestOperator, - 'save_summary') - save_summary.start() - self.addCleanup(save_summary.stop) - - # _write_file is called per helm chart - write_file = mock.patch.object(ArmadaManifestOperator, - '_write_file') - write_file.start() - self.addCleanup(write_file.stop) - - def tearDown(self): - super(HelmOperatorTestSuiteMixin, self).tearDown() - - @mock.patch.object(HelmOperator, '_write_chart_overrides') - def test_generate_helm_chart_overrides(self, mock_write_chart): - self.operator.generate_helm_application_overrides(self.path_name, - self.app_name) - assert self.mock_save_overrides.called