Remove armada and helm v2

With the application framework moving to FluxCD,
Armada is no longer supported and its configuration
files and resources are no longer necessary.

The same applies to helm v2 (Tiller) with the system
now using helm v3.

Test Plan:
PASS: app operations still work
PASS: deploy and all system apps uploaded/applied
PASS: deploy unlocked/enabled/available
PASS: upgrade sx
PASS: upgrade dx
PASS: deploy dc
PASS: bnr

Signed-off-by: Fabricio Henrique Ramos <fabriciohenrique.ramos@windriver.com>
Change-Id: I48bf128afa3b85295e83f524c827ced8a5e3da75
This commit is contained in:
Fabricio Henrique Ramos 2022-12-27 18:45:49 -03:00
parent dcd5c91862
commit c937f46ece
19 changed files with 98 additions and 2469 deletions

View File

@ -184,7 +184,7 @@ sysinv_conductor_monitor () {
fi
# A workaround for monitoring the owner of /home/sysadmin/.kube
# This should be removed as soon as Helm v3 and Armada containerization work is complete.
# This should be removed as soon as Helm v3 containerization work is complete.
if [ -d /home/sysadmin/.kube -a "$(stat -c %U /home/sysadmin/.kube)" != "sysadmin" ]; then
chown -R sysadmin:sys_protected /home/sysadmin/.kube
ocf_log info "Fixing /home/sysadmin/.kube ownership"
@ -215,11 +215,6 @@ sysinv_conductor_start () {
fi
fi
# Remove any existing application containers in case they were not shut
# down cleanly. sysinv-conductor will start these on demand with first use.
ocf_log info "${proc} Cleaning up any stale application containers before start. (sysinv-conductor)"
sysinv_remove_application_containers
if [ ${OCF_RESKEY_dbg} = "true" ] ; then
RUN_OPT_DEBUG="--debug"
else
@ -251,48 +246,6 @@ sysinv_conductor_start () {
return ${rc}
}
sysinv_remove_application_containers() {
local containers='armada_service'
local rc
# The entry point for this is when the conductor has been confirmed to be
# stopped. Now cleanup any dependent service containers. This will be done
# here until we re-factor the management of (i.e. catch SIGKILL and cleanup)
# or the retirement of (i.e. move armada to a pod) these dependencies
# On a non K8S configuration docker status will be EXIT_NOTIMPLEMENTED
systemctl status docker 2>&1 >> /dev/null
rc=$?
if [ $rc -eq 3 ]; then
ocf_log info "${proc} Docker is not running, skipping container actions. (sysinv-conductor)"
return
fi
# Shutdown containers with DRBD dependencies that would prevent a swact.
for c in $containers; do
local id
# does the container exist
id=$(docker container ls -qf name=${c} 2>/dev/null)
if [ ! -n "$id" ]; then
ocf_log info "${proc} Container $c is not started, skipping stop action. (sysinv-conductor)"
else
# Graceful shutdown (default is 10 sec, then kill)
ocf_log info "${proc} About to stop container $c... (sysinv-conductor)"
docker stop $c 2>&1 >> /dev/null
fi
id=$(docker container ls -aqf name=${c} 2>/dev/null)
if [ ! -n "$id" ]; then
ocf_log info "${proc} Container $c is not present, skipping remove action. (sysinv-conductor)"
else
# Cleanup the container. Use force just in case.
ocf_log info "${proc} About to remove container $c... (sysinv-conductor)"
docker rm -f $c 2>&1 >> /dev/null
fi
done
}
sysinv_conductor_confirm_stop() {
local my_bin
local my_processes
@ -322,8 +275,6 @@ sysinv_conductor_stop () {
ocf_log info "${proc} Sysinv Conductor (sysinv-conductor) already stopped"
sysinv_conductor_confirm_stop
sysinv_remove_application_containers
return ${OCF_SUCCESS}
fi
@ -364,8 +315,6 @@ sysinv_conductor_stop () {
fi
sysinv_conductor_confirm_stop
sysinv_remove_application_containers
ocf_log info "${proc} Sysinv Conductor (sysinv-conductor) stopped."
rm -f $OCF_RESKEY_pid

View File

@ -72,9 +72,6 @@ systemconfig.puppet_plugins =
042_sssd = sysinv.puppet.sssd:SssdPuppet
099_service_parameter = sysinv.puppet.service_parameter:ServiceParamPuppet
systemconfig.armada.manifest_ops =
generic = sysinv.helm.manifest_generic:GenericArmadaManifestOperator
systemconfig.fluxcd.kustomize_ops =
generic = sysinv.helm.kustomize_generic:GenericFluxCDKustomizeOperator

View File

@ -202,7 +202,7 @@ class KubeAppController(rest.RestController):
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(KubeApp, body=types.apidict)
def post(self, body):
"""Uploading an application to be deployed by Armada"""
"""Uploading an application to be deployed"""
tarfile_path = body.get('tarfile')
tarfile_binary = body.get('binary_data', '')
name = body.get('name', '')
@ -491,12 +491,13 @@ class KubeAppController(rest.RestController):
applied_app.progress = None
applied_app.save()
# TODO revise comment below regarding armada
# If the version has ever applied before(inactive app found),
# use armada rollback to apply application later, otherwise,
# use armada apply.
# On the AIO-SX, always use armada apply even it was applied
# use ----- rollback to apply application later, otherwise,
# use ----- apply.
# On the AIO-SX, always use ----- apply even it was applied
# before, issue on AIO-SX(replicas is 1) to leverage rollback,
# armada/helm rollback --wait does not wait for pods to be
# -----/helm rollback --wait does not wait for pods to be
# ready before it returns.
# related to helm issue,
# https://github.com/helm/helm/issues/4210
@ -697,19 +698,14 @@ class KubeAppHelper(object):
def _find_manifest(self, app_path, app_name):
""" Find the required application manifest elements
Check for an Armada manifest or a FluxCD manifest directory
Check for a FluxCD manifest directory
"""
try:
# Check for the presence of a FluxCD manifest directory
mfile = self._find_fluxcd_manifest(app_path, app_name)
except exception.SysinvException as fluxcd_e:
try:
# Check for the presence of an Armada manifest
mfile = self._find_armada_manifest(app_path)
except exception.SysinvException as armada_e:
raise exception.SysinvException(_(
"Application-upload rejected: {} and {} ".format(
fluxcd_e, armada_e)))
raise exception.SysinvException(_(
"Application-upload rejected: {}".format(fluxcd_e)))
return mfile
def _find_fluxcd_manifest(self, app_path, app_name):
@ -720,26 +716,6 @@ class KubeAppHelper(object):
raise exception.SysinvException(_(
"FluxCD manifest structure is not present"))
def _find_armada_manifest(self, app_path):
# It is expected that there is only one manifest file
# per application and the file exists at top level of
# the application path.
mfiles = cutils.find_armada_manifest_file(app_path)
if mfiles is None:
raise exception.SysinvException(_(
"Armada manifest file is corrupted."))
if mfiles:
if len(mfiles) == 1:
return mfiles[0]
else:
raise exception.SysinvException(_(
"tar file contains more than one Armada manifest file."))
raise exception.SysinvException(_(
"Armada manifest file/directory is missing"))
def _verify_metadata_file(self, app_path, app_name, app_version,
upgrade_from_release=None):
try:

View File

@ -256,8 +256,6 @@ class KubeUpgradeController(rest.RestController):
apps = pecan.request.dbapi.kube_app_get_all()
self._check_installed_apps_compatibility(apps, to_version)
# TODO: check that tiller/armada support new k8s version
# The system must be healthy
success, output = pecan.request.rpcapi.get_system_health(
pecan.request.context,

View File

@ -47,42 +47,11 @@ def create_fluxcd_app_overrides_action(path, app_name=None, namespace=None):
app_operator.activate_app_plugins(db_app)
helm_operator.generate_helm_application_overrides(
path, app_name, mode=None, cnamespace=namespace,
armada_format=False, chart_info=None, combined=False,
is_fluxcd_app=True)
chart_info=None, combined=False)
app_operator.deactivate_app_plugins(db_app)
else:
helm_operator.generate_helm_application_overrides(
path, app_name, mode=None, cnamespace=namespace,
is_fluxcd_app=True)
def create_armada_app_overrides_action(path, app_name=None, namespace=None):
dbapi = api.get_instance()
try:
db_app = dbapi.kube_app_get(app_name)
except exception.KubeAppNotFound:
LOG.info("Application %s not found" % app_name)
return
helm_operator = helm.HelmOperator(dbapi=dbapi)
app_operator = kube_app.AppOperator(dbapi, helm_operator, {})
if not app_operator.app_has_system_plugins(db_app):
LOG.info("Overrides generation for application %s is "
"not supported via this command." % app_name)
else:
if db_app.status == constants.APP_UPLOAD_SUCCESS:
app_operator.activate_app_plugins(db_app)
helm_operator.generate_helm_application_overrides(
path, app_name, mode=None, cnamespace=namespace,
armada_format=False, chart_info=None, combined=False,
is_fluxcd_app=False)
app_operator.deactivate_app_plugins(db_app)
else:
helm_operator.generate_helm_application_overrides(
path, app_name, mode=None, cnamespace=namespace,
is_fluxcd_app=False)
path, app_name, mode=None, cnamespace=namespace)
def add_action_parsers(subparsers):
@ -92,12 +61,6 @@ def add_action_parsers(subparsers):
parser.add_argument('app_name', nargs='?')
parser.add_argument('namespace', nargs='?')
parser = subparsers.add_parser('create-armada-app-overrides')
parser.set_defaults(func=create_armada_app_overrides_action)
parser.add_argument('path', nargs='?')
parser.add_argument('app_name', nargs='?')
parser.add_argument('namespace', nargs='?')
CONF.register_cli_opt(
cfg.SubCommandOpt('action',
@ -119,12 +82,3 @@ def main():
CONF.action.func(CONF.action.path,
CONF.action.app_name,
CONF.action.namespace)
elif CONF.action.name == 'create-armada-app-overrides':
if not CONF.action.path:
LOG.error("A path is required to save overrides")
elif not CONF.action.app_name:
LOG.error("Armada application name is required")
else:
CONF.action.func(CONF.action.path,
CONF.action.app_name,
CONF.action.namespace)

View File

@ -1747,9 +1747,6 @@ APP_METADATA_FILE = 'metadata.yaml'
APP_PENDING_REAPPLY_FLAG = os.path.join(
tsc.HELM_OVERRIDES_PATH, ".app_reapply")
# Armada
APP_SYNCED_ARMADA_DATA_PATH = os.path.join(tsc.PLATFORM_PATH, 'armada', tsc.SW_VERSION)
# FluxCD
APP_FLUXCD_MANIFEST_DIR = 'fluxcd-manifests'
APP_FLUXCD_DATA_PATH = os.path.join(tsc.PLATFORM_PATH, 'fluxcd', tsc.SW_VERSION)
@ -1822,12 +1819,10 @@ APP_LIFECYCLE_TYPE_SEMANTIC_CHECK = 'check'
APP_LIFECYCLE_TYPE_OPERATION = 'operation'
APP_LIFECYCLE_TYPE_RBD = 'rbd'
APP_LIFECYCLE_TYPE_RESOURCE = 'resource'
# armada manifest
# fluxcd manifest
# outside the function that has the retry decorator
APP_LIFECYCLE_TYPE_MANIFEST = 'manifest'
# inside the function that has a retry decorator
APP_LIFECYCLE_TYPE_ARMADA_REQUEST = 'armada-request'
# same as armada
APP_LIFECYCLE_TYPE_FLUXCD_REQUEST = 'fluxcd-request'
APP_LIFECYCLE_MODE_MANUAL = 'manual'
@ -1975,9 +1970,6 @@ APP_PROGRESS_RECOVER_CHARTS = 'recovering helm charts'
APP_PROGRESS_UPDATE_FAILED_SKIP_RECOVERY = "Application {} update from " \
"version {} to version {} failed and recovery skipped " \
"because skip_recovery was requested."
APP_PROGRESS_UPDATE_FAILED_ARMADA_TO_FLUXCD = "Application {} update from " \
"version {} to version {} failed and recovery skipped " \
"because recovering between Armada and FluxCD is not allowed"
APP_PROGRESS_REMOVE_FAILED_WARNING = "Application remove failed. Status forced to '{}'. " \
"Use native helm commands to clean up application helm releases."

View File

@ -2557,37 +2557,6 @@ def find_metadata_file(path, metadata_file, upgrade_from_release=None):
return app_name, app_version, patches
def find_armada_manifest_file(path):
""" Find all Armada manifest files in a given directory. """
def _is_armada_manifest(yaml_file):
with io.open(yaml_file, 'r', encoding='utf-8') as f:
docs = yaml.load_all(f)
for doc in docs:
try:
if "armada/Manifest" in doc['schema']:
manifest_name = doc['metadata']['name']
return manifest_name, yaml_file
except KeyError:
# Could be some other yaml files
pass
return None, None
mfiles = []
for file in os.listdir(path):
if file.endswith('.yaml'):
yaml_file = os.path.join(path, file)
try:
mname, mfile = _is_armada_manifest(yaml_file)
if mfile:
mfiles.append((mname, mfile))
except Exception as e:
# Included yaml file is corrupted
LOG.exception(e)
return None
return mfiles
def find_fluxcd_manifests_directory(path, name):
"""For FluxCD apps we expect to have one top-level manifest directory that
contains the name of constants.APP_FLUXCD_MANIFEST_DIR. Validate that it
@ -2759,25 +2728,6 @@ def is_aio_duplex_system(dbapi):
system.system_mode == constants.SYSTEM_MODE_DUPLEX_DIRECT))
def generate_synced_armada_dir(app_name, app_version):
""" Armada application: Top level directory. """
return os.path.join(constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version)
def generate_synced_armada_manifest_fqpn(app_name, app_version, manifest_filename):
""" Armada application: Armada manifest file. """
return os.path.join(
constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version,
app_name + '-' + manifest_filename)
def generate_synced_metadata_fqpn(app_name, app_version):
""" Armada application: Application metadata file. """
return os.path.join(
constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version,
'metadata.yaml')
def generate_synced_fluxcd_dir(app_name, app_version):
""" FluxCD application: Top level directory. """
return os.path.join(constants.APP_FLUXCD_DATA_PATH, app_name, app_version)
@ -2847,7 +2797,7 @@ def get_app_supported_kube_version(app_name, app_version):
"""Get the application supported k8s version from the synced application metadata file"""
app_metadata_path = os.path.join(
constants.APP_SYNCED_ARMADA_DATA_PATH, app_name,
constants.APP_FLUXCD_DATA_PATH, app_name,
app_version, constants.APP_METADATA_FILE)
kube_min_version = None

File diff suppressed because it is too large Load Diff

View File

@ -372,7 +372,6 @@ class ConductorManager(service.PeriodicService):
self._app = kube_app.AppOperator(self.dbapi, self._helm, self.apps_metadata)
self._docker = kube_app.DockerHelper(self.dbapi)
self._kube = kubernetes.KubeOperator()
self._armada = kube_app.ArmadaHelper(self._kube)
self._kube_app_helper = kube_api.KubeAppHelper(self.dbapi)
self._fernet = fernet.FernetOperator()
@ -6995,18 +6994,6 @@ class ConductorManager(service.PeriodicService):
"activity")
return
# Ensure that armada pod is running and ready.
pods = self._kube.kube_get_pods_by_selector("armada",
"application=armada",
"status.phase=Running")
for pod in pods:
if (pod.metadata.deletion_timestamp is None and
self._armada.check_pod_ready_probe(pod)):
break
else:
LOG.warning("Armada pod is not running and ready. Defer audit.")
return
# Ensure that FluxCD pods are ready.
if not self._app.check_fluxcd_pod_status():
LOG.warning("FluxCD pods are not ready. Defer audit.")
@ -14212,8 +14199,8 @@ class ConductorManager(service.PeriodicService):
# as well as removing the writing to disk of the new overrides
old_hash = {}
app.charts = self._app._get_list_of_charts(app)
(helm_files, armada_files) = self._app._get_overrides_files(app, None)
for f in helm_files + armada_files:
helm_files = self._app._get_overrides_files(app)
for f in helm_files:
with open(f, 'rb') as file:
old_hash[f] = hashlib.md5(file.read()).hexdigest()
@ -14223,10 +14210,9 @@ class ConductorManager(service.PeriodicService):
app.charts = self._app._get_list_of_charts(app)
self._helm.generate_helm_application_overrides(
app.sync_overrides_dir, app.name, app.mode, cnamespace=None,
armada_format=True, chart_info=app.charts, combined=True,
is_fluxcd_app=app.is_fluxcd_app)
(helm_files, armada_files) = self._app._get_overrides_files(app, None)
for f in helm_files + armada_files:
chart_info=app.charts, combined=True)
helm_files = self._app._get_overrides_files(app)
for f in helm_files:
with open(f, 'rb') as file:
new_hash[f] = hashlib.md5(file.read()).hexdigest()

View File

@ -403,21 +403,6 @@ class BaseHelm(object):
"""
return True
def execute_manifest_updates(self, operator):
"""
Update the elements of the armada manifest.
This allows a helm chart plugin to use the ArmadaManifestOperator to
make dynamic structural changes to the application manifest based on the
current conditions in the platform
Changes include updates to manifest documents for the following schemas:
armada/Manifest/v1, armada/ChartGroup/v1, armada/Chart/v1.
:param operator: an instance of the ArmadaManifestOperator
"""
pass
def execute_kustomize_updates(self, operator):
"""
Update the elements of FluxCD kustomize manifests.

View File

@ -39,13 +39,12 @@ HELM_NS_CERT_MANAGER = 'cert-manager'
HELM_NS_VAULT = 'vault'
HELM_NS_NOTIFICATION = 'notification'
HELM_NS_DEPLOYMENT = 'deployment'
HELM_NS_ARMADA = 'armada'
HELM_NS_FLUX_HELM = 'flux-helm'
# namespace groups for pod security admission controller
PRIVILEGED_NS = [HELM_NS_CEPH, HELM_NS_NFS, HELM_NS_OPENSTACK, HELM_NS_HELM_TOOLKIT,
HELM_NS_MONITOR, HELM_NS_RBD_PROVISIONER, HELM_NS_STORAGE_PROVISIONER,
HELM_NS_CERT_MANAGER, HELM_NS_VAULT, HELM_NS_DEPLOYMENT, HELM_NS_ARMADA,
HELM_NS_CERT_MANAGER, HELM_NS_VAULT, HELM_NS_DEPLOYMENT,
HELM_NS_KUBE_SYSTEM, HELM_NS_NOTIFICATION, HELM_NS_FLUX_HELM]
POD_SECURITY_VERSION = 'latest'

View File

@ -10,7 +10,6 @@ from __future__ import absolute_import
import eventlet
import os
import re
import tempfile
import yaml
@ -18,7 +17,6 @@ from six import iteritems
from stevedore import extension
from oslo_log import log as logging
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.helm import common
@ -36,12 +34,6 @@ yaml.Dumper.ignore_aliases = lambda *data: True
# The convention here is for the helm plugins to be named ###_PLUGINNAME.
HELM_PLUGIN_PREFIX_LENGTH = 4
# Number of optional characters appended to Armada manifest operator name,
# to allow overriding with a newer version of the Armada manifest operator.
# The convention here is for the Armada operator plugins to allow an
# optional suffix, as in PLUGINNAME_###.
ARMADA_PLUGIN_SUFFIX_LENGTH = 4
# Number of optional characters appended to FluxCD kustomize operator name, to
# allow overriding with a newer version of the FluxCD kustomize operator. The
# convention here is for the FluxCD kustomize operator plugins to allow an
@ -83,27 +75,24 @@ class HelmOperator(object):
# Define the stevedore namespaces that will need to be managed for plugins
STEVEDORE_APPS = 'systemconfig.helm_applications'
STEVEDORE_ARMADA = 'systemconfig.armada.manifest_ops'
STEVEDORE_FLUXCD = 'systemconfig.fluxcd.kustomize_ops'
STEVEDORE_LIFECYCLE = 'systemconfig.app_lifecycle'
def __init__(self, dbapi=None):
self.dbapi = dbapi
# Find all plugins for apps, charts per app, and armada manifest
# operators
# Find all plugins for apps, charts per app, and fluxcd operators
self.discover_plugins()
@utils.synchronized(LOCK_NAME)
def discover_plugins(self):
""" Scan for all available plugins """
LOG.debug("HelmOperator: Loading available helm, armada and lifecycle plugins.")
LOG.debug("HelmOperator: Loading available helm, fluxcd and lifecycle plugins.")
# Initialize the plugins
self.helm_system_applications = {}
self.chart_operators = {}
self.armada_manifest_operators = {}
self.fluxcd_kustomize_operators = {}
self.app_lifecycle_operators = {}
@ -115,9 +104,6 @@ class HelmOperator(object):
# dict containing sequence of helm charts per app
self.helm_system_applications = self._load_helm_applications()
# dict containing Armada manifest operators per app
self.armada_manifest_operators = self._load_armada_manifest_operators()
# dict containing FluxCD kustomize operators per app
self.fluxcd_kustomize_operators = self._load_fluxcd_kustomize_operators()
@ -153,32 +139,6 @@ class HelmOperator(object):
LOG.info("Couldn't find endpoint distribution located at %s for "
"%s" % (install_location, lifecycle_distribution))
for armada_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA]:
armada_distribution = None
try:
armada_distribution = utils.get_distribution_from_entry_point(armada_ep)
(project_name, project_location) = \
utils.get_project_name_and_location_from_distribution(armada_distribution)
if project_location == install_location:
extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA].remove(armada_ep)
break
except exception.SysinvException:
# Temporary suppress errors on Debian until Stevedore is reworked.
# See https://storyboard.openstack.org/#!/story/2009101
if utils.is_debian():
LOG.info("Didn't find distribution for {}. Deleting from cache".format(armada_ep))
try:
extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA].remove(armada_ep)
except Exception as e:
LOG.info("Tried removing armada_ep {}, error: {}".format(armada_ep, e))
else:
raise
else:
LOG.info("Couldn't find endpoint distribution located at %s for "
"%s" % (install_location, armada_distribution))
for fluxcd_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_FLUXCD]:
fluxcd_distribution = None
@ -252,7 +212,7 @@ class HelmOperator(object):
# Temporary suppress errors on Debian until Stevedore is reworked.
# See https://storyboard.openstack.org/#!/story/2009101
if utils.is_debian():
LOG.info("Tried removing app_ep {}, error: {}".format(armada_ep, e))
LOG.info("Tried removing app_ep {}, error: {}".format(app_ep, e))
continue
else:
raise
@ -277,12 +237,6 @@ class HelmOperator(object):
else:
LOG.info("No entry points for %s found." % self.STEVEDORE_APPS)
try:
del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA]
LOG.debug("Deleted entry points for %s." % self.STEVEDORE_ARMADA)
except KeyError:
LOG.info("No entry points for %s found." % self.STEVEDORE_ARMADA)
try:
del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_FLUXCD]
LOG.debug("Deleted entry points for %s." % self.STEVEDORE_FLUXCD)
@ -328,44 +282,6 @@ class HelmOperator(object):
return operator
def _load_armada_manifest_operators(self):
"""Build a dictionary of armada manifest operators"""
operators_dict = {}
dist_info_dict = {}
armada_manifest_operators = extension.ExtensionManager(
namespace=self.STEVEDORE_ARMADA,
invoke_on_load=True, invoke_args=())
sorted_armada_manifest_operators = sorted(
armada_manifest_operators.extensions, key=lambda x: x.name)
for op in sorted_armada_manifest_operators:
if (op.name[-(ARMADA_PLUGIN_SUFFIX_LENGTH - 1):].isdigit() and
op.name[-ARMADA_PLUGIN_SUFFIX_LENGTH:-3] == '_'):
op_name = op.name[0:-ARMADA_PLUGIN_SUFFIX_LENGTH]
else:
op_name = op.name
operators_dict[op_name] = op.obj
distribution = utils.get_distribution_from_entry_point(op.entry_point)
(project_name, project_location) = \
utils.get_project_name_and_location_from_distribution(distribution)
# Extract distribution information for logging
dist_info_dict[op_name] = {
'name': project_name,
'location': project_location,
}
# Provide some log feedback on plugins being used
for (app_name, info) in iteritems(dist_info_dict):
LOG.debug("Plugins for %-20s: loaded from %-20s - %s." % (app_name,
info['name'], info['location']))
return operators_dict
def _load_fluxcd_kustomize_operators(self):
"""Build a dictionary of FluxCD kustomize operators"""
@ -404,16 +320,6 @@ class HelmOperator(object):
return operators_dict
def get_armada_manifest_operator(self, app_name):
"""Return a manifest operator based on app name"""
plugin_name = utils.find_app_plugin_name(app_name)
if plugin_name in self.armada_manifest_operators:
manifest_op = self.armada_manifest_operators[plugin_name]
else:
manifest_op = self.armada_manifest_operators['generic']
return manifest_op
def get_fluxcd_kustomize_operator(self, app_name):
"""Return a kustomize operator based on app name"""
@ -649,93 +555,6 @@ class HelmOperator(object):
LOG.info(e)
return overrides
def _get_helm_chart_location(self, chart_name, repo_name, chart_tarfile):
"""Get the chart location.
This method returns the download location for a given chart.
:param chart_name: name of the chart
:param repo_name: name of the repo that chart uploaded to
:param chart_tarfile: name of the chart tarfile
:returns: a URL as location
"""
if repo_name is None:
repo_name = common.HELM_REPO_FOR_APPS
if chart_tarfile is None:
# TODO: Clean up the assumption
chart_tarfile = chart_name + '-0.1.0'
# Set the location based on ip address since
# http://controller does not resolve in armada container.
sys_controller_network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_CLUSTER_HOST)
sys_controller_network_addr_pool = self.dbapi.address_pool_get(sys_controller_network.pool_uuid)
sc_float_ip = sys_controller_network_addr_pool.floating_address
if utils.is_valid_ipv6(sc_float_ip):
sc_float_ip = '[' + sc_float_ip + ']'
return 'http://{}:{}/helm_charts/{}/{}.tgz'.format(
sc_float_ip,
utils.get_http_port(self.dbapi), repo_name, chart_tarfile)
def _add_armada_override_header(self, chart_name, chart_metadata_name, repo_name,
chart_tarfile, namespace, overrides):
if chart_metadata_name is None:
chart_metadata_name = namespace + '-' + chart_name
new_overrides = {
'schema': 'armada/Chart/v1',
'metadata': {
'schema': 'metadata/Document/v1',
'name': chart_metadata_name
},
'data': {
'values': overrides
}
}
location = self._get_helm_chart_location(chart_name, repo_name, chart_tarfile)
if location:
new_overrides['data'].update({
'source': {
'location': location
}
})
return new_overrides
def _get_chart_info_from_armada_chart(self, chart_name, chart_namespace,
chart_info_list):
""" Extract the metadata name of the armada chart, repo and the name of
the chart tarfile from the armada manifest chart.
:param chart_name: name of the chart from the (application list)
:param chart_namespace: namespace of the chart
:param chart_info_list: a list of chart objects containing information
extracted from the armada manifest
:returns: the metadata name of the chart, the supported StarlingX repository,
the name of the chart tarfile or None,None,None if not present
"""
# Could be called without any armada_manifest info. Returning 'None'
# will enable helm defaults to point to common.HELM_REPO_FOR_APPS
metadata_name = None
repo = None
chart_tarfile = None
if chart_info_list is None:
return metadata_name, repo, chart_tarfile
location = None
for c in chart_info_list:
if (c.name == chart_name and
c.namespace == chart_namespace):
location = c.location
metadata_name = c.metadata_name
break
if location:
match = re.search('/helm_charts/(.*)/(.*).tgz', location)
if match:
repo = match.group(1)
chart_tarfile = match.group(2)
LOG.debug("Chart %s can be found in repo: %s" % (chart_name, repo))
return metadata_name, repo, chart_tarfile
def merge_overrides(self, file_overrides=None, set_overrides=None):
""" Merge helm overrides together.
@ -832,186 +651,6 @@ class HelmOperator(object):
@helm_context
@utils.synchronized(LOCK_NAME)
def generate_helm_application_overrides(self, path, app_name,
mode=None,
cnamespace=None,
armada_format=False,
chart_info=None,
combined=False,
is_fluxcd_app=False):
"""Create the system overrides files for a supported application
This method will generate system helm chart overrides yaml files for a
set of supported charts that comprise an application. If the namespace
is provided only the overrides files for that specified namespace will
be written.
:param app_name: name of the bundle of charts required to support an
application
:param mode: mode to control how to apply application manifest
:param cnamespace: (optional) namespace
:param armada_format: (optional) whether to emit in armada format
instead of helm format (with extra header)
:param chart_info: (optional) supporting chart information
extracted from the armada manifest which is used to influence
overrides
:param combined: (optional) whether to apply user overrides on top of
system overrides
:param is_fluxcd_app: whether the app is fluxcd or not
"""
if is_fluxcd_app:
self._generate_helm_application_overrides_fluxcd(
path, app_name, mode, cnamespace,
chart_info, combined)
else:
self._generate_helm_application_overrides_armada(
path, app_name, mode, cnamespace, armada_format,
chart_info, combined)
@helm_context
def _generate_helm_application_overrides_armada(self, path, app_name,
mode=None,
cnamespace=None,
armada_format=False,
chart_info=None,
combined=False):
"""Create the system overrides files for a supported application
This method will generate system helm chart overrides yaml files for a
set of supported charts that comprise an application. If the namespace
is provided only the overrides files for that specified namespace will
be written.
:param app_name: name of the bundle of charts required to support an
application
:param mode: mode to control how to apply application manifest
:param cnamespace: (optional) namespace
:param armada_format: (optional) whether to emit in armada format
instead of helm format (with extra header)
:param chart_info: (optional) supporting chart information
extracted from the armada manifest which is used to influence
overrides
:param combined: (optional) whether to apply user overrides on top of
system overrides
"""
app, plugin_name = self._find_kube_app_and_app_plugin_name(app_name)
# Get a manifest operator to provide a single point of
# manipulation for the chart, chart group and manifest schemas
manifest_op = self.get_armada_manifest_operator(app.name)
# Load the manifest into the operator
armada_manifest = utils.generate_synced_armada_manifest_fqpn(
app.name, app.app_version, app.manifest_file)
manifest_op.load(armada_manifest)
if plugin_name in self.helm_system_applications:
app_overrides = self._get_helm_application_overrides(plugin_name,
cnamespace)
for (chart_name, overrides) in iteritems(app_overrides):
if combined:
# The overrides at this point are the system overrides. For
# charts with multiple namespaces, the overrides would
# contain multiple keys, one for each namespace.
#
# Retrieve the user overrides of each namespace from the
# database and merge this list of user overrides, if they
# exist, with the system overrides. Both system and user
# override contents are then merged based on the namespace,
# prepended with required header and written to
# corresponding files (<namespace>-<chart>.yaml).
file_overrides = []
for chart_namespace in overrides.keys():
try:
db_chart = self.dbapi.helm_override_get(
app.id, chart_name, chart_namespace)
db_user_overrides = db_chart.user_overrides
if db_user_overrides:
file_overrides.append(yaml.dump(
{chart_namespace: yaml.load(db_user_overrides)}))
except exception.HelmOverrideNotFound:
pass
if file_overrides:
# Use dump() instead of safe_dump() as the latter is
# not agreeable with password regex in some overrides
system_overrides = yaml.dump(overrides)
file_overrides.insert(0, system_overrides)
combined_overrides = self.merge_overrides(
file_overrides=file_overrides)
overrides = yaml.load(combined_overrides)
# If armada formatting is wanted, we need to change the
# structure of the yaml file somewhat
for key in overrides:
metadata_name, repo_name, chart_tarfile = \
self._get_chart_info_from_armada_chart(chart_name, key,
chart_info)
new_overrides = self._add_armada_override_header(
chart_name, metadata_name, repo_name, chart_tarfile,
key, overrides[key])
overrides[key] = new_overrides
self._write_chart_overrides(path, chart_name, cnamespace, overrides)
# Update manifest docs based on the plugin directives. If the
# application does not provide a manifest operator, the
# GenericArmadaManifestOperator is used and chart specific
# operations can be skipped.
if manifest_op.APP:
if chart_name in self.chart_operators:
self.chart_operators[chart_name].execute_manifest_updates(
manifest_op)
# Update the manifest based on platform conditions
manifest_op.platform_mode_manifest_updates(self.dbapi, mode)
else:
# Generic applications
for chart in chart_info:
try:
db_chart = self.dbapi.helm_override_get(
app.id, chart.name, chart.namespace)
except exception.HelmOverrideNotFound:
# This routine is to create helm overrides entries
# in database during application-upload so that user
# can list the supported helm chart overrides of the
# application via helm-override-list
try:
values = {
'name': chart.name,
'namespace': chart.namespace,
'app_id': app.id,
}
db_chart = self.dbapi.helm_override_create(values=values)
except Exception as e:
LOG.exception(e)
return
user_overrides = {chart.namespace: {}}
db_user_overrides = db_chart.user_overrides
if db_user_overrides:
user_overrides = yaml.load(yaml.dump(
{chart.namespace: yaml.load(db_user_overrides)}))
metadata_name, repo_name, chart_tarfile =\
self._get_chart_info_from_armada_chart(chart.name, chart.namespace,
chart_info)
new_overrides = self._add_armada_override_header(
chart.name, metadata_name, repo_name, chart_tarfile,
chart.namespace, user_overrides[chart.namespace])
user_overrides[chart.namespace] = new_overrides
self._write_chart_overrides(path, chart.name,
cnamespace, user_overrides)
# Write the manifest doc overrides, a summmary file for easy --value
# generation on the apply, and a unified manifest for deletion.
manifest_op.save_overrides()
manifest_op.save_summary(path=path)
manifest_op.save_delete_manifest()
@helm_context
def _generate_helm_application_overrides_fluxcd(self, path, app_name,
mode=None,
cnamespace=None,
chart_info=None,
@ -1188,7 +827,7 @@ class HelmOperator(object):
yaml.dump(overrides, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, filepath)
# Change the permission to be readable to non-root users(ie.Armada)
# Change the permission to be readable to non-root users
os.chmod(filepath, 0o644)
except Exception:
LOG.exception("failed to write overrides file: %s" % filepath)

View File

@ -22,7 +22,7 @@ class LifecycleHookInfo(base.SysinvObject):
Attributes:
mode (string): Manual or Auto
lifecycle_type (string): Type of the hook (semantic check, operation
rbd, resource, manifest, armada-request).
rbd, resource, manifest, fluxcd-request).
relative_timing (string): Relative timing to the operation (pre/post).
operation (string): Operation being performed.
extra (dict): Can populate data here and it is passed to the outside.

View File

@ -1,519 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2019-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System inventory Armada manifest operator."""
import abc
import io
import os
import json
import ruamel.yaml as yaml
import six
import tempfile
from glob import glob
from six import iteritems
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
KEY_SCHEMA = 'schema'
VAL_SCHEMA_MANIFEST = 'armada/Manifest/v1'
VAL_SCHEMA_CHART_GROUP = 'armada/ChartGroup/v1'
VAL_SCHEMA_CHART = 'armada/Chart/v1'
KEY_METADATA = 'metadata'
KEY_METADATA_NAME = 'name'
KEY_DATA = 'data'
KEY_DATA_CHART_GROUPS = 'chart_groups' # for manifest doc updates
KEY_DATA_CHART_GROUP = 'chart_group' # for chart group doc updates
KEY_DATA_CHART_NAME = 'chart_name' # for chart doc updates
# Attempt to keep a compact filename
FILE_PREFIX = {
KEY_DATA_CHART_GROUPS: 'm-', # for manifest doc overrides
KEY_DATA_CHART_GROUP: 'cg-', # for chart group doc overrides
KEY_DATA_CHART_NAME: 'c-' # for chart doc overrides
}
FILE_SUFFIX = '-meta.yaml'
SUMMARY_FILE = 'armada-overrides.yaml'
@six.add_metaclass(abc.ABCMeta)
class ArmadaManifestOperator(object):
def __init__(self, manifest_fqpn=None):
self.manifest_path = None # Location to write overrides
self.delete_manifest = None # Unified manifest for app deletion
self.content = [] # original app manifest content
self.docs = {
KEY_DATA_CHART_GROUPS: {}, # LUT for all manifest docs
KEY_DATA_CHART_GROUP: {}, # LUT for all chart group docs
KEY_DATA_CHART_NAME: {} # LUT for all chart docs
}
self.updated = {
KEY_DATA_CHART_GROUPS: set(), # indicate manifest doc change
KEY_DATA_CHART_GROUP: set(), # indicate chart group update
KEY_DATA_CHART_NAME: set() # indicate chart doc update
}
if manifest_fqpn:
self.load(manifest_fqpn)
def __str__(self):
return json.dumps({
'manifest': self.docs[KEY_DATA_CHART_GROUPS],
'chart_groups': self.docs[KEY_DATA_CHART_GROUP],
'charts': self.docs[KEY_DATA_CHART_NAME],
}, indent=2)
def load_summary(self, path):
""" Load the list of generated overrides files
Generate a list of override files that were written for the manifest.
This is used to generate Armada --values overrides for the manifest.
:param path: location of the overrides summary file
:return: a list of override files written
"""
files_written = []
summary_fqpn = os.path.join(path, SUMMARY_FILE)
if os.path.exists(summary_fqpn):
self.manifest_path = os.path.dirname(summary_fqpn)
with io.open(summary_fqpn, 'r', encoding='utf-8') as f:
# The RoundTripLoader removes the superfluous quotes by default,
# resulting the dumped out charts not readable in Armada.
# Set preserve_quotes=True to preserve all the quotes.
files_written = list(yaml.load_all(
f, Loader=yaml.RoundTripLoader, preserve_quotes=True))[0]
return files_written
def load(self, manifest_fqpn):
""" Load the application manifest for processing
:param manifest_fqpn: fully qualified path name of the application manifest
"""
if os.path.exists(manifest_fqpn):
# Save the path for writing overrides files
self.manifest_path = os.path.dirname(manifest_fqpn)
# Save the name for a delete manifest
self.delete_manifest = "%s-del%s" % os.path.splitext(manifest_fqpn)
with io.open(manifest_fqpn, 'r', encoding='utf-8') as f:
# The RoundTripLoader removes the superfluous quotes by default,
# resulting the dumped out charts not readable in Armada.
# Set preserve_quotes=True to preserve all the quotes.
self.content = list(yaml.load_all(
f, Loader=yaml.RoundTripLoader, preserve_quotes=True))
# Generate the lookup tables
# For the individual chart docs
self.docs[KEY_DATA_CHART_NAME] = {
i[KEY_METADATA][KEY_METADATA_NAME]: i
for i in self.content
if i[KEY_SCHEMA] == VAL_SCHEMA_CHART}
# For the chart group docs
self.docs[KEY_DATA_CHART_GROUP] = {
i[KEY_METADATA][KEY_METADATA_NAME]: i
for i in self.content
if i[KEY_SCHEMA] == VAL_SCHEMA_CHART_GROUP}
# For the single manifest doc
self.docs[KEY_DATA_CHART_GROUPS] = {
i[KEY_METADATA][KEY_METADATA_NAME]: i
for i in self.content
if i[KEY_SCHEMA] == VAL_SCHEMA_MANIFEST}
else:
LOG.error("Manifest file %s does not exist" % manifest_fqpn)
def _cleanup_meta_files(self, path):
""" Remove any previously written overrides files
:param path: directory containing manifest overrides files
"""
for k, v in iteritems(FILE_PREFIX):
fileregex = "{}*{}".format(v, FILE_SUFFIX)
filepath = os.path.join(self.manifest_path, fileregex)
for f in glob(filepath):
os.remove(f)
def _cleanup_deletion_manifest(self):
""" Remove any previously written deletion manifest
"""
if self.delete_manifest and os.path.exists(self.delete_manifest):
os.remove(self.delete_manifest)
def _write_file(self, path, filename, pathfilename, data):
""" Write a yaml file
:param path: path to write the file
:param filename: name of the file
:param pathfilename: FQPN of the file
:param data: file data
"""
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(data, f, Dumper=yaml.RoundTripDumper,
default_flow_style=False)
os.close(fd)
os.rename(tmppath, pathfilename)
# Change the permission to be readable to non-root
# users(ie.Armada)
os.chmod(pathfilename, 0o644)
except Exception:
if os.path.exists(tmppath):
os.remove(tmppath)
LOG.exception("Failed to write meta overrides %s" % pathfilename)
raise
def save_summary(self, path=None):
""" Write a yaml file containing the list of override files generated
:param path: optional alternative location to write the file
"""
files_written = []
for k, v in iteritems(self.updated):
for i in v:
filename = '{}{}{}'.format(FILE_PREFIX[k], i, FILE_SUFFIX)
filepath = os.path.join(self.manifest_path, filename)
files_written.append(filepath)
# Write the list of files generated. This can be read to include with
# the Armada overrides
if path and os.path.exists(path):
# if provided, write to an alternate location
self._write_file(path, SUMMARY_FILE,
os.path.join(path, SUMMARY_FILE),
files_written)
else:
# if not provided, write to the armada directory
self._write_file(self.manifest_path, SUMMARY_FILE,
os.path.join(self.manifest_path, SUMMARY_FILE),
files_written)
def save_overrides(self):
""" Save the overrides files
Write the elements of the manifest (manifest, chart_group, chart) that
was updated into an overrides file. The files are written to the same
directory as the application manifest.
"""
if self.manifest_path and os.path.exists(self.manifest_path):
# cleanup any existing meta override files
self._cleanup_meta_files(self.manifest_path)
# Only write the updated docs as meta overrides
for k, v in iteritems(self.updated):
for i in v:
filename = '{}{}{}'.format(FILE_PREFIX[k], i, FILE_SUFFIX)
filepath = os.path.join(self.manifest_path, filename)
self._write_file(self.manifest_path, filename, filepath,
self.docs[k][i])
else:
LOG.error("Manifest directory %s does not exist" % self.manifest_path)
def save_delete_manifest(self):
""" Save an updated manifest for deletion
armada delete doesn't support --values files as does the apply. To
handle proper deletion of the conditional charts/chart groups that end
up in the overrides files, create a unified file for use when deleting.
NOTE #1: If we want to abandon using manifest overrides files altogether,
this generated file could probably be used on apply and delete.
NOTE #2: Diffing the original manifest and this manifest provides a
clear view of the conditional changes that were enforced by the system
in the plugins
"""
if self.manifest_path and os.path.exists(self.manifest_path):
# cleanup existing deletion manifest
self._cleanup_deletion_manifest()
with open(self.delete_manifest, 'w') as f:
try:
yaml.dump_all(self.content, f, Dumper=yaml.RoundTripDumper,
explicit_start=True,
default_flow_style=False)
LOG.debug("Delete manifest file %s generated" %
self.delete_manifest)
except Exception as e:
LOG.error("Failed to generate delete manifest file %s: "
"%s" % (self.delete_manifest, e))
else:
LOG.error("Manifest directory %s does not exist" % self.manifest_path)
def _validate_manifest(self, manifest):
""" Ensure that the manifest is known
:param manifest: name of the manifest
"""
if manifest not in self.docs[KEY_DATA_CHART_GROUPS]:
LOG.error("%s is not %s" % (manifest, self.docs[KEY_DATA_CHART_GROUPS].keys()))
return False
return True
def _validate_chart_group(self, chart_group):
""" Ensure that the chart_group is known
:param chart_group: name of the chart_group
"""
if chart_group not in self.docs[KEY_DATA_CHART_GROUP]:
LOG.error("%s is an unknown chart_group" % chart_group)
return False
return True
def _validate_chart_groups_from_list(self, chart_group_list):
""" Ensure that all the charts groups in chart group list are known
:param chart_group_list: list of chart groups
"""
for cg in chart_group_list:
if not self._validate_chart_group(cg):
return False
return True
def _validate_chart(self, chart):
""" Ensure that the chart is known
:param chart: name of the chart
"""
if chart not in self.docs[KEY_DATA_CHART_NAME]:
LOG.error("%s is an unknown chart" % chart)
return False
return True
def _validate_chart_from_list(self, chart_list):
""" Ensure that all the charts in chart list are known
:param chart_list: list of charts
"""
for c in chart_list:
if not self._validate_chart(c):
return False
return True
def manifest_chart_groups_delete(self, manifest, chart_group):
""" Delete a chart group from a manifest
This method will delete a chart group from a manifest's list of charts
groups.
:param manifest: manifest containing the list of chart groups
:param chart_group: chart group name to delete
"""
if (not self._validate_manifest(manifest) or
not self._validate_chart_group(chart_group)):
return
if chart_group not in self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][
KEY_DATA_CHART_GROUPS]:
LOG.info("%s is not currently enabled. Cannot delete." %
chart_group)
return
self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][
KEY_DATA_CHART_GROUPS].remove(chart_group)
self.updated[KEY_DATA_CHART_GROUPS].update([manifest])
def manifest_chart_groups_insert(self, manifest, chart_group, before_group=None):
""" Insert a chart group into a manifest
This method will insert a chart group into a manifest at the end of the
list of chart groups. If the before_group parameter is used the chart
group can be placed at a specific point in the chart group list.
:param manifest: manifest containing the list of chart groups
:param chart_group: chart group name to insert
:param before_group: chart group name to be appear after the inserted
chart group in the list
"""
if (not self._validate_manifest(manifest) or
not self._validate_chart_group(chart_group)):
return
if chart_group in self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS]:
LOG.error("%s is already enabled. Cannot insert." %
chart_group)
return
if before_group:
if not self._validate_chart_group(before_group):
return
if before_group not in self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][
KEY_DATA_CHART_GROUPS]:
LOG.error("%s is not currently enabled. Cannot insert %s" %
(before_group, chart_group))
return
cgs = self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS]
insert_index = cgs.index(before_group)
cgs.insert(insert_index, chart_group)
self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS] = cgs
else:
self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][
KEY_DATA_CHART_GROUPS].append(chart_group)
self.updated[KEY_DATA_CHART_GROUPS].update([manifest])
def manifest_chart_groups_set(self, manifest, chart_group_list=None):
""" Set the chart groups for a specific manifest
This will replace the current set of charts groups in the manifest as
specified by the armada/Manifest/v1 schema with the provided list of
chart groups.
:param manifest: manifest containing the list of chart groups
:param chart_group_list: list of chart groups to replace the current set
of chart groups
"""
if not self._validate_manifest(manifest):
return
if chart_group_list:
if not self._validate_chart_groups_from_list(chart_group_list):
return
self.docs[KEY_DATA_CHART_GROUPS][manifest][KEY_DATA][KEY_DATA_CHART_GROUPS] = chart_group_list
self.updated[KEY_DATA_CHART_GROUPS].update([manifest])
else:
LOG.error("Cannot set the manifest chart_groups to an empty list")
def chart_group_chart_delete(self, chart_group, chart):
""" Delete a chart from a chart group
This method will delete a chart from a chart group's list of charts.
:param chart_group: chart group name
:param chart: chart name to remove from the chart list
"""
if (not self._validate_chart_group(chart_group) or
not self._validate_chart(chart)):
return
if chart not in self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][
KEY_DATA_CHART_GROUP]:
LOG.info("%s is not currently enabled. Cannot delete." %
chart)
return
self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][
KEY_DATA_CHART_GROUP].remove(chart)
self.updated[KEY_DATA_CHART_GROUP].update([chart_group])
def chart_group_chart_insert(self, chart_group, chart, before_chart=None):
""" Insert a chart into a chart group
This method will insert a chart into a chart group at the end of the
list of charts. If the before_chart parameter is used the chart can be
placed at a specific point in the chart list.
:param chart_group: chart group name
:param chart: chart name to insert
:param before_chart: chart name to be appear after the inserted chart in
the list
"""
if (not self._validate_chart_group(chart_group) or
not self._validate_chart(chart)):
return
if chart in self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP]:
LOG.error("%s is already enabled. Cannot insert." %
chart)
return
if before_chart:
if not self._validate_chart(before_chart):
return
if before_chart not in self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][
KEY_DATA_CHART_GROUP]:
LOG.error("%s is not currently enabled. Cannot insert %s" %
(before_chart, chart))
return
cg = self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP]
insert_index = cg.index(before_chart)
cg.insert(insert_index, chart)
self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP] = cg
else:
self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][
KEY_DATA_CHART_GROUP].append(chart)
self.updated[KEY_DATA_CHART_GROUP].update([chart_group])
def chart_group_set(self, chart_group, chart_list=None):
""" Set the charts for a specific chart group
This will replace the current set of charts specified in the chart group
with the provided list.
:param chart_group: chart group name
:param chart_list: list of charts to replace the current set of charts
"""
if not self._validate_chart_group(chart_group):
return
if chart_list:
if not self._validate_chart_from_list(chart_list):
return
self.docs[KEY_DATA_CHART_GROUP][chart_group][KEY_DATA][KEY_DATA_CHART_GROUP] = chart_list
self.updated[KEY_DATA_CHART_GROUP].update([chart_group])
else:
LOG.error("Cannot set the chart_group charts to an empty list")
def chart_group_add(self, chart_group, data):
""" Add a new chart group to the manifest.
To support a self-contained dynamic plugin, this method is called to
introduced a new chart group based on the armada/ChartGroup/v1 schema.
:param chart_group: chart group name
:param data: chart group data
"""
# Not implemented... yet.
pass
def chart_add(self, chart, data):
""" Add a new chart to the manifest.
To support a self-contained dynamic plugin, this method is called to
introduced a new chart based on the armada/Chart/v1 schema.
:param chart: chart name
:param data: chart data
"""
# Not implemented... yet.
pass
@abc.abstractmethod
def platform_mode_manifest_updates(self, dbapi, mode):
""" Update the application manifest based on the platform
:param dbapi: DB api object
:param mode: mode to control how to apply the application manifest
"""
pass

View File

@ -1,29 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System inventory Armada manifest operator."""
from sysinv.helm import manifest_base as base
class GenericArmadaManifestOperator(base.ArmadaManifestOperator):
APP = None
ARMADA_MANIFEST = None
CHART_GROUPS_LUT = {}
CHARTS_LUT = {}
def platform_mode_manifest_updates(self, dbapi, mode):
""" Update the application manifest based on the platform
:param dbapi: DB api object
:param mode: mode to control how to apply the application manifest
"""
pass

View File

@ -12,7 +12,6 @@
import base64
import os
import psutil
import retrying
import ruamel.yaml as yaml
import tempfile
import threading
@ -121,59 +120,8 @@ def retrieve_helm_v3_releases():
timer.cancel()
@retry(stop_max_attempt_number=6, wait_fixed=20 * 1000,
retry_on_exception=_retry_on_HelmTillerFailure)
def retrieve_helm_v2_releases():
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF
helm_list = subprocess.Popen(
['helmv2-cli', '--',
'helm',
'list', '--output', 'yaml', '--tiller-connection-timeout', '5'],
env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
timer = threading.Timer(20, kill_process_and_descendants, [helm_list])
try:
timer.start()
out, err = helm_list.communicate()
if helm_list.returncode != 0:
if err:
raise exception.HelmTillerFailure(reason=err)
# killing the subprocesses with +kill() when timer expires returns EBADF
# because the pipe is closed, but no error string on stderr.
if helm_list.returncode == -9:
raise exception.HelmTillerFailure(
reason="helmv2-cli -- helm list operation timed out after "
"20 seconds. Terminated by threading timer.")
raise exception.HelmTillerFailure(
reason="helmv2-cli -- helm list operation failed without "
"error message, errno=%s" % helm_list.returncode)
deployed_releases = {}
if out:
output = yaml.safe_load(out)
releases = output.get('Releases', {})
for r in releases:
r_name = r.get('Name')
r_version = r.get('Revision')
r_namespace = r.get('Namespace')
deployed_releases.setdefault(r_name, {}).update(
{r_namespace: r_version})
return deployed_releases
except Exception as e:
raise exception.HelmTillerFailure(
reason="Failed to retrieve helmv2 releases: %s" % e)
finally:
timer.cancel()
def retrieve_helm_releases():
"""Retrieve the deployed helm releases from tiller
"""Retrieve the deployed helm releases
Get the name, namespace and version for the deployed releases
by querying helm tiller
@ -182,58 +130,10 @@ def retrieve_helm_releases():
deployed_releases = {}
deployed_releases.update(retrieve_helm_v3_releases())
deployed_releases.update(retrieve_helm_v2_releases())
return deployed_releases
def delete_helm_release(release):
"""Delete helm v2 release
This method deletes a helm v2 release without --purge which removes
all associated resources from kubernetes but not from the store(ETCD)
In the scenario of updating application, the method is needed to clean
up the releases if there were deployed releases in the old application
but not in the new application
:param release: the name of the helm release
"""
# NOTE: This mechanism deletes armada/tiller managed releases.
# This could be adapted to also delete helm v3 releases using
# 'helm uninstall'.
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF
helm_cmd = subprocess.Popen(
['helmv2-cli', '--',
'helm', 'delete', release, '--tiller-connection-timeout', '5'],
env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
timer = threading.Timer(20, kill_process_and_descendants, [helm_cmd])
try:
timer.start()
out, err = helm_cmd.communicate()
if err and not out:
if ("deletion completed" or "not found" or "is already deleted") in err:
LOG.debug("Release %s not found or deleted already" % release)
return True
raise exception.HelmTillerFailure(
reason="Failed to delete release: %s" % err)
elif not err and not out:
err_msg = "Failed to delete release. " \
"Helm tiller response timeout."
raise exception.HelmTillerFailure(reason=err_msg)
return True
except Exception as e:
LOG.error("Failed to delete release: %s" % e)
raise exception.HelmTillerFailure(
reason="Failed to delete release: %s" % e)
finally:
timer.cancel()
def delete_helm_v3_release(release, namespace="default", flags=None):
"""Delete helm v3 release
@ -278,67 +178,6 @@ def delete_helm_v3_release(release, namespace="default", flags=None):
timer.cancel()
def _retry_on_HelmTillerFailure_reset_tiller(ex):
LOG.info('Caught HelmTillerFailure exception. Resetting tiller and retrying... '
'Exception: {}'.format(ex))
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF
helm_reset = subprocess.Popen(
['helmv2-cli', '--',
'helm', 'reset', '--force'],
env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
timer = threading.Timer(20, kill_process_and_descendants, [helm_reset])
try:
timer.start()
out, err = helm_reset.communicate()
if helm_reset.returncode == 0:
return isinstance(ex, exception.HelmTillerFailure)
elif err:
raise exception.HelmTillerFailure(reason=err)
else:
err_msg = "helmv2-cli -- helm reset operation failed."
raise exception.HelmTillerFailure(reason=err_msg)
except Exception as e:
raise exception.HelmTillerFailure(
reason="Failed to reset tiller: %s" % e)
finally:
timer.cancel()
@retrying.retry(stop_max_attempt_number=2,
retry_on_exception=_retry_on_HelmTillerFailure_reset_tiller)
def get_openstack_pending_install_charts():
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF
helm_list = subprocess.Popen(
['helmv2-cli', '--',
'helm', 'list', '--namespace', 'openstack',
'--pending', '--tiller-connection-timeout', '5'],
env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
timer = threading.Timer(20, kill_process_and_descendants, [helm_list])
try:
timer.start()
out, err = helm_list.communicate()
if helm_list.returncode == 0:
return out
elif err:
raise exception.HelmTillerFailure(reason=err)
else:
err_msg = "helmv2-cli -- helm list operation timeout."
raise exception.HelmTillerFailure(reason=err_msg)
except Exception as e:
raise exception.HelmTillerFailure(
reason="Failed to obtain pending charts list: %s" % e)
finally:
timer.cancel()
def install_helm_chart_with_dry_run(args=None):
"""Simulate a chart install

View File

@ -415,7 +415,7 @@ class TestPostKubeUpgrade(TestKubeUpgrade,
dbutils.create_test_app(
name='stx-openstack',
app_version='1.0-19',
manifest_name='openstack-armada-manifest',
manifest_name='manifest',
manifest_file='stx-openstack.yaml',
status='applied',
active=True)

View File

@ -197,7 +197,7 @@ class TestKubeAppImageParser(base.TestCase):
self.assertEqual(expected, images_dict_with_local_registry)
def test_generate_download_images_with_merge_dict(self):
armada_chart_imgs = copy.deepcopy(IMAGES_RESOURCE)
chart_imgs = copy.deepcopy(IMAGES_RESOURCE)
override_imgs = {
'images': {
@ -313,7 +313,7 @@ class TestKubeAppImageParser(base.TestCase):
}
download_imgs_dict = self.image_parser.merge_dict(
armada_chart_imgs, override_imgs)
chart_imgs, override_imgs)
self.assertEqual(expected, download_imgs_dict)
def test_generate_download_images_list(self):

View File

@ -1,58 +0,0 @@
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import keyring
import mock
from sysinv.helm.helm import HelmOperator
from sysinv.helm.manifest_base import ArmadaManifestOperator
from sysinv.tests.db import utils as dbutils
from sysinv.tests.helm import base as helm_base
class HelmOperatorTestSuiteMixin(helm_base.HelmTestCaseMixin):
"""When HelmOperatorTestSuiteMixin is added as a Mixin
alongside a subclass of BaseHostTestCase
these testcases are added to it
This also requires an AppMixin to provide app_name
"""
def setUp(self):
super(HelmOperatorTestSuiteMixin, self).setUp()
self.app = dbutils.create_test_app(name=self.app_name)
# If a ceph keyring entry is missing, a subprocess will be invoked
# so a fake keyring password is being supplied here.
keyring.set_password('glance', 'admin_keyring', 'FakePassword1*')
# Armada routines that write to disk can be mocked away
save_overrides = mock.patch.object(ArmadaManifestOperator,
'save_overrides')
self.mock_save_overrides = save_overrides.start()
self.addCleanup(save_overrides.stop)
save_delete_manifest = mock.patch.object(ArmadaManifestOperator,
'save_delete_manifest')
save_delete_manifest.start()
self.addCleanup(save_delete_manifest.stop)
save_summary = mock.patch.object(ArmadaManifestOperator,
'save_summary')
save_summary.start()
self.addCleanup(save_summary.stop)
# _write_file is called per helm chart
write_file = mock.patch.object(ArmadaManifestOperator,
'_write_file')
write_file.start()
self.addCleanup(write_file.stop)
def tearDown(self):
super(HelmOperatorTestSuiteMixin, self).tearDown()
@mock.patch.object(HelmOperator, '_write_chart_overrides')
def test_generate_helm_chart_overrides(self, mock_write_chart):
self.operator.generate_helm_application_overrides(self.path_name,
self.app_name)
assert self.mock_save_overrides.called