Armada-Sysinv integration

Initial implementation of Armada integration with sysinv which
entails:

- Basic application upload via system application-upload command
- Application install via system application-apply command
- Application remove via system application-remove command
- Application delete via system application-delete command
- Application list and detail viewing via system
  application-list and application-show commands.

This implementation does not cover the following functionalities
that are either still under discussion or in planning:
a) support for remote CLI where application tarball resides in
   the client machine
b) support for air-gapped scenario/embedded private images
c) support for custom apps' user overrides

Tests conducted:
- config controller
- tox
- functional tests (both Openstack and simple test app):
    - upload
    - apply
    - remove
    - delete
    - show
    - list
    - release group upgrade with user overrides
- failure tests:
    - no tar file supplied
    - corrupted tar file
    - app already exists/does not exist
    - upload failure (missing manifest, multi manifests,
      no image tags, checksum test failure, etc...)
    - apply failure (nodes are not labeled, image download
      failure, etc...)
    - operation not permitted

Change-Id: Iec27f356bd0047b2c7ef860ab3a2528f5a371868
Story: 2003908
Task: 26792
Signed-off-by: Tee Ngo <Tee.Ngo@windriver.com>
This commit is contained in:
Tee Ngo 2018-11-01 09:11:28 -04:00
parent 8fbe7aebd6
commit d8d8851fa2
23 changed files with 1729 additions and 77 deletions

View File

@ -75,6 +75,7 @@ class sysinv (
) {
include sysinv::params
include ::platform::kubernetes::params
Package['sysinv'] -> Sysinv_config<||>
Package['sysinv'] -> Sysinv_api_paste_ini<||>
@ -212,4 +213,11 @@ class sysinv (
sysinv_api_paste_ini {
'filter:authtoken/region_name': value => $region_name;
}
if $::platform::kubernetes::params::enabled == true {
$armada_img_tag = "quay.io/airshipit/armada:f807c3a1ec727c883c772ffc618f084d960ed5c9"
sysinv_config {
'DEFAULT/armada_image_tag': value => $armada_img_tag;
}
}
}

View File

@ -23,7 +23,6 @@ Root wrapper for OpenStack services
from __future__ import print_function
from six.moves import configparser
import logging
import os
import pwd
@ -31,6 +30,7 @@ import signal
import subprocess
import sys
from six.moves import configparser
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98

View File

@ -16,13 +16,13 @@
# under the License.
from six.moves import configparser
import logging
import logging.handlers
import os
import string
from cgtsclient.openstack.common.rootwrap import filters
from six.moves import configparser
class NoFilterMatched(Exception):

View File

@ -0,0 +1,67 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# -*- encoding: utf-8 -*-
#
from cgtsclient.common import base
class App(base.Resource):
def __repr__(self):
return "<app %s>" % self._info
class AppManager(base.Manager):
resource_class = App
@staticmethod
def _path(name=None):
return '/v1/apps/%s' % name if name else '/v1/apps'
def list(self):
"""Retrieve the list of containerized apps known to the system."""
return self._list(self._path(), 'apps')
def get(self, app_name):
"""Retrieve the details of a given app
:param name: name of the application
"""
try:
return self._list(self._path(app_name))[0]
except IndexError:
return None
def upload(self, data):
"""Stage the specified application, getting it ready for deployment.
:param data: application name and location of tarfile
"""
return self._create(self._path(), data)
def apply(self, app_name):
"""Install/upgrade the specified application.
:param app_name: name of the application
"""
return self._update(self._path(app_name) + '?directive=apply',
{'values': {}})
def remove(self, app_name):
"""Uninstall the specified application
:param name: app_name
"""
return self._update(self._path(app_name) + '?directive=remove',
{'values': {}})
def delete(self, app_name):
"""Delete application data
:param name: app_name
"""
return self._delete(self._path(app_name))

View File

@ -0,0 +1,93 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from cgtsclient.common import utils
from cgtsclient import exc
from collections import OrderedDict
def _print_application_show(app):
ordereddata = OrderedDict(sorted(app.to_dict().items(),
key=lambda t: t[0]))
utils.print_dict(ordereddata, wrap=72)
def do_application_list(cc, args):
"""List all containerized applications"""
apps = cc.app.list()
labels = ['application', 'manifest name', 'manifest file', 'status']
fields = ['name', 'manifest_name', 'manifest_file', 'status']
utils.print_list(apps, fields, labels, sortby=0)
@utils.arg('name', metavar='<app name>',
help="Name of the application")
def do_application_show(cc, args):
"""Show application details"""
try:
app = cc.app.get(args.name)
_print_application_show(app)
except exc.HTTPNotFound:
raise exc.CommandError('application not found: %s' % args.name)
@utils.arg('name', metavar='<app name>',
help='Name of the application')
@utils.arg('tarfile', metavar='<tar file>',
help='Tarball containing application manifest, helm charts and'
' config file')
def do_application_upload(cc, args):
"""Upload application Helm chart(s) and manifest"""
tarfile = args.tarfile
if not os.path.isabs(tarfile):
tarfile = os.path.join(os.getcwd(), tarfile)
if not os.path.isfile(tarfile):
raise exc.CommandError("Error: Tar file %s does not exist" % tarfile)
if not tarfile.endswith('.tgz') and not tarfile.endswith('.tar.gz'):
raise exc.CommandError("Error: File %s has unrecognizable tar file "
"extension." % tarfile)
data = {'name': args.name,
'tarfile': tarfile}
response = cc.app.upload(data)
_print_application_show(response)
@utils.arg('name', metavar='<app name>',
help='Name of the application')
def do_application_apply(cc, args):
"""Apply/reapply the application manifest"""
try:
response = cc.app.apply(args.name)
_print_application_show(response)
except exc.HTTPNotFound:
raise exc.CommandError('Application not found: %s' % args.name)
@utils.arg('name', metavar='<app name>',
help='Name of the application to be uninstalled')
def do_application_remove(cc, args):
"""Uninstall the application"""
try:
response = cc.app.remove(args.name)
_print_application_show(response)
except exc.HTTPNotFound:
raise exc.CommandError('Application not found: %s' % args.name)
@utils.arg('name', metavar='<application name>',
help='Name of the application to be deleted')
def do_application_delete(cc, args):
"""Remove the uninstalled application from the system"""
try:
cc.app.delete(args.name)
print('Application %s deleted.' % args.name)
except exc.HTTPNotFound:
raise exc.CommandError('Application not found: %s' % args.name)

View File

@ -20,6 +20,7 @@
from cgtsclient.common import http
from cgtsclient.v1 import address
from cgtsclient.v1 import address_pool
from cgtsclient.v1 import app
from cgtsclient.v1 import ceph_mon
from cgtsclient.v1 import certificate
from cgtsclient.v1 import cluster
@ -152,3 +153,4 @@ class Client(http.HTTPClient):
self.helm = helm.HelmManager(self)
self.label = label.KubernetesLabelManager(self)
self.fernet = fernet.FernetManager(self)
self.app = app.AppManager(self)

View File

@ -8,6 +8,7 @@
from cgtsclient.common import utils
from cgtsclient.v1 import address_pool_shell
from cgtsclient.v1 import address_shell
from cgtsclient.v1 import app_shell
from cgtsclient.v1 import ceph_mon_shell
from cgtsclient.v1 import certificate_shell
from cgtsclient.v1 import cluster_shell
@ -113,6 +114,7 @@ COMMAND_MODULES = [
storage_tier_shell,
helm_shell,
label_shell,
app_shell,
]

View File

@ -37,6 +37,7 @@ from sysinv.api.controllers.v1 import firewallrules
from sysinv.api.controllers.v1 import health
from sysinv.api.controllers.v1 import helm_charts
from sysinv.api.controllers.v1 import host
from sysinv.api.controllers.v1 import kube_app
from sysinv.api.controllers.v1 import label
from sysinv.api.controllers.v1 import interface
from sysinv.api.controllers.v1 import interface_network
@ -237,6 +238,9 @@ class V1(base.APIBase):
fernet_repo = [link.Link]
"Links to the fernet repo resource"
apps = [link.Link]
"Links to the application resource "
@classmethod
def convert(self):
v1 = V1()
@ -738,6 +742,15 @@ class V1(base.APIBase):
'fernet_repo', '',
bookmark=True)
]
v1.apps = [link.Link.make_link('self',
pecan.request.host_url,
'apps', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'apps', '',
bookmark=True)]
return v1
@ -803,6 +816,7 @@ class Controller(rest.RestController):
license = license.LicenseController()
labels = label.LabelController()
fernet_repo = fernet_repo.FernetKeyController()
apps = kube_app.KubeAppController()
@wsme_pecan.wsexpose(V1)
def get(self):

View File

@ -4,11 +4,8 @@
# SPDX-License-Identifier: Apache-2.0
#
import os
import pecan
from pecan import rest
import subprocess
import tempfile
import yaml
import wsme
@ -24,73 +21,9 @@ from sysinv.openstack.common.gettextutils import _
LOG = log.getLogger(__name__)
SYSTEM_CHARTS = ['mariadb', 'rabbitmq', 'ingress']
class HelmChartsController(rest.RestController):
def merge_overrides(self, file_overrides=[], set_overrides=[]):
""" Merge helm overrides together.
:param values: A dict of different types of user override values,
'files' (which generally specify many overrides) and
'set' (which generally specify one override).
"""
# At this point we have potentially two separate types of overrides
# specified by system or user, values from files and values passed in
# via --set . We need to ensure that we call helm using the same
# mechanisms to ensure the same behaviour.
cmd = ['helm', 'install', '--dry-run', '--debug']
# Process the newly-passed-in override values
tmpfiles = []
for value_file in file_overrides:
# For values passed in from files, write them back out to
# temporary files.
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(value_file)
tmpfile.close()
tmpfiles.append(tmpfile.name)
cmd.extend(['--values', tmpfile.name])
for value_set in set_overrides:
cmd.extend(['--set', value_set])
env = os.environ.copy()
env['KUBECONFIG'] = '/etc/kubernetes/admin.conf'
# Make a temporary directory with a fake chart in it
try:
tmpdir = tempfile.mkdtemp()
chartfile = tmpdir + '/Chart.yaml'
with open(chartfile, 'w') as tmpchart:
tmpchart.write('name: mychart\napiVersion: v1\n'
'version: 0.1.0\n')
cmd.append(tmpdir)
# Apply changes by calling out to helm to do values merge
# using a dummy chart.
# NOTE: this requires running sysinv-api as root, will fix it
# to use RPC in a followup patch.
output = subprocess.check_output(cmd, env=env)
# Check output for failure
# Extract the info we want.
values = output.split('USER-SUPPLIED VALUES:\n')[1].split(
'\nCOMPUTED VALUES:')[0]
except Exception:
raise
finally:
os.remove(chartfile)
os.rmdir(tmpdir)
for tmpfile in tmpfiles:
os.remove(tmpfile)
return values
@wsme_pecan.wsexpose(wtypes.text)
def get_all(self):
"""Provides information about the available charts to override."""
@ -133,8 +66,8 @@ class HelmChartsController(rest.RestController):
# with user-specified overrides taking priority over the system
# overrides.
file_overrides = [system_overrides, user_overrides]
combined_overrides = self.merge_overrides(
file_overrides=file_overrides)
combined_overrides = pecan.request.rpcapi.merge_overrides(
pecan.request.context, file_overrides=file_overrides)
rpc_chart = {'name': name,
'namespace': namespace,
@ -173,7 +106,7 @@ class HelmChartsController(rest.RestController):
db_chart = objects.helm_overrides.get_by_name(
pecan.request.context, name, namespace)
except exception.HelmOverrideNotFound:
if name in SYSTEM_CHARTS:
if name in constants.SUPPORTED_HELM_CHARTS:
pecan.request.dbapi.helm_override_create({
'name': name,
'namespace': namespace,
@ -191,8 +124,9 @@ class HelmChartsController(rest.RestController):
raise wsme.exc.ClientSideError(_("Invalid flag: %s must be either "
"'reuse' or 'reset'.") % flag)
user_overrides = self.merge_overrides(
file_overrides=file_overrides, set_overrides=set_overrides)
user_overrides = pecan.request.rpcapi.merge_overrides(
pecan.request.context, file_overrides=file_overrides,
set_overrides=set_overrides)
# save chart overrides back to DB
db_chart.user_overrides = user_overrides

View File

@ -0,0 +1,348 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import pecan
from pecan import rest
import shutil
import subprocess
import tempfile
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
import yaml
from contextlib import contextmanager
from sysinv import objects
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv.openstack.common import log
from sysinv.openstack.common.gettextutils import _
LOG = log.getLogger(__name__)
@contextmanager
def TempDirectory():
tmpdir = tempfile.mkdtemp()
saved_umask = os.umask(0077)
try:
yield tmpdir
finally:
LOG.debug("Cleaning up temp directory %s" % tmpdir)
os.umask(saved_umask)
shutil.rmtree(tmpdir)
class KubeApp(base.APIBase):
"""API representation of a containerized application."""
id = int
"Unique ID for this application"
name = wtypes.text
"Represents the name of the application"
created_at = wtypes.datetime.datetime
"Represents the time the application was uploaded"
updated_at = wtypes.datetime.datetime
"Represents the time the application was updated"
manifest_name = wtypes.text
"Represents the name of the application manifest"
manifest_file = wtypes.text
"Represents the filename of the application manifest"
status = wtypes.text
"Represents the installation status of the application"
def __init__(self, **kwargs):
self.fields = objects.kube_app.fields.keys()
for k in self.fields:
if not hasattr(self, k):
continue
setattr(self, k, kwargs.get(k, wtypes.Unset))
@classmethod
def convert_with_links(cls, rpc_app, expand=True):
app = KubeApp(**rpc_app.as_dict())
if not expand:
app.unset_fields_except(['name', 'manifest_name',
'manifest_file', 'status'])
# skip the id
app.id = wtypes.Unset
return app
class KubeAppCollection(collection.Collection):
"""API representation of a collection of Helm applications."""
apps = [KubeApp]
"A list containing application objects"
def __init__(self, **kwargs):
self._type = 'apps'
@classmethod
def convert_with_links(cls, rpc_apps, expand=False):
collection = KubeAppCollection()
collection.apps = [KubeApp.convert_with_links(n, expand)
for n in rpc_apps]
return collection
LOCK_NAME = 'KubeAppController'
class KubeAppController(rest.RestController):
"""REST controller for Helm applications."""
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _check_environment(self):
if not utils.is_kubernetes_config():
raise exception.OperationNotPermitted
def _check_tarfile(self, app_name, app_tarfile):
if app_name and app_tarfile:
if not os.path.isfile(app_tarfile):
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: application tar file %s does "
"not exist." % app_tarfile))
if (not app_tarfile.endswith('.tgz') and
not app_tarfile.endswith('.tar.gz')):
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: %s is not a tar file" %
app_tarfile))
with TempDirectory() as app_path:
if not cutils.extract_tarfile(app_path, app_tarfile):
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: failed to extract tar file "
"%s." % os.path.basename(app_tarfile)))
# If checksum file is included in the tarball, verify its contents.
if not self._verify_checksum(app_path):
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: checksum validation failed."))
mname, mfile = self._find_manifest_file(app_path)
charts_dir = os.path.join(app_path, 'charts')
if os.path.isdir(charts_dir):
tar_filelist = cutils.get_files_matching(app_path, '.tgz')
if (len(os.listdir(charts_dir)) == 0 or
not tar_filelist):
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: tar file "
"contains no Helm charts."))
for p, f in tar_filelist:
if not cutils.extract_tarfile(p, os.path.join(p, f)):
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: failed to extract tar "
"file %s" % os.path.basename(f)))
LOG.info("Tar file of application %s verified." % app_name)
return mname, mfile
else:
raise ValueError(_(
"Application-upload rejected: both application name and tar "
"file must be specified."))
def _verify_checksum(self, app_path):
rc = True
for file in os.listdir(app_path):
if file.endswith('.md5'):
cwd = os.getcwd()
os.chdir(app_path)
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(['md5sum', '-c', file],
stdout=fnull, stderr=fnull)
LOG.info("Checksum file is included and validated.")
except subprocess.CalledProcessError as e:
LOG.exception(e)
rc = False
finally:
os.chdir(cwd)
return rc
# Do we need to make the inclusion of md5 file a hard requirement?
LOG.info("Checksum file is not included, skipping validation.")
return rc
def _find_manifest_file(self, app_path):
# It is expected that there is only one manifest file
# per application and the file exists at top level of
# the application path.
def _is_manifest(yaml_file):
with open(yaml_file, 'r') as f:
docs = yaml.load_all(f)
for doc in docs:
try:
if "armada/Manifest" in doc['schema']:
manifest_name = doc['metadata']['name']
return manifest_name, yaml_file
except KeyError:
# Could be some other yaml files
pass
return None, None
mfiles = []
for file in os.listdir(app_path):
if file.endswith('.yaml'):
yaml_file = os.path.join(app_path, file)
mname, mfile = _is_manifest(yaml_file)
if mfile:
mfiles.append((mname, mfile))
if mfiles:
if len(mfiles) == 1:
return mfiles[0]
else:
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: tar file contains more "
"than one manifest file."))
else:
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: manifest file is missing."))
def _get_one(self, app_name):
# can result in KubeAppNotFound
kube_app = objects.kube_app.get_by_name(
pecan.request.context, app_name)
return KubeApp.convert_with_links(kube_app)
@wsme_pecan.wsexpose(KubeAppCollection)
def get_all(self):
self._check_environment()
apps = pecan.request.dbapi.kube_app_get_all()
return KubeAppCollection.convert_with_links(apps)
@wsme_pecan.wsexpose(KubeApp, wtypes.text)
def get_one(self, app_name):
"""Retrieve a single application."""
self._check_environment()
return self._get_one(app_name)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(KubeApp, body=types.apidict)
def post(self, body):
"""Uploading an application to be deployed by Armada"""
self._check_environment()
name = body.get('name')
tarfile = body.get('tarfile')
try:
objects.kube_app.get_by_name(pecan.request.context, name)
raise wsme.exc.ClientSideError(_(
"Application-upload rejected: application %s already exists." %
name))
except exception.KubeAppNotFound:
pass
mname, mfile = self._check_tarfile(name, tarfile)
# Create a database entry and make an rpc async request to upload
# the application
app_data = {'name': name,
'manifest_name': mname,
'manifest_file': os.path.basename(mfile),
'status': constants.APP_UPLOAD_IN_PROGRESS}
try:
new_app = pecan.request.dbapi.kube_app_create(app_data)
except exception.SysinvException as e:
LOG.exception(e)
raise
pecan.request.rpcapi.perform_app_upload(pecan.request.context,
new_app, tarfile)
return KubeApp.convert_with_links(new_app)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(KubeApp, wtypes.text, wtypes.text, wtypes.text)
def patch(self, name, directive, values):
"""Install/update the specified application
:param name: application name
:param directive: either 'apply' (fresh install/update) or 'remove'
"""
self._check_environment()
if directive not in ['apply', 'remove']:
raise exception.OperationNotPermitted
try:
db_app = objects.kube_app.get_by_name(pecan.request.context, name)
except exception.KubeAppNotFound:
LOG.error("Received a request to %s app %s which does not exist." %
(directive, name))
raise wsme.exc.ClientSideError(_(
"Application-%s rejected: application not found." % directive))
if directive == 'apply':
if db_app.status == constants.APP_APPLY_IN_PROGRESS:
raise wsme.exc.ClientSideError(_(
"Application-apply rejected: install/update is already "
"in progress."))
elif db_app.status not in [constants.APP_UPLOAD_SUCCESS,
constants.APP_APPLY_FAILURE,
constants.APP_APPLY_SUCCESS]:
raise wsme.exc.ClientSideError(_(
"Application-apply rejected: operation is not allowed "
"while the current status is %s." % db_app.status))
db_app.status = constants.APP_APPLY_IN_PROGRESS
db_app.save()
pecan.request.rpcapi.perform_app_apply(pecan.request.context,
db_app)
return KubeApp.convert_with_links(db_app)
else:
if db_app.status not in [constants.APP_APPLY_SUCCESS,
constants.APP_APPLY_FAILURE]:
raise wsme.exc.ClientSideError(_(
"Application-remove rejected: operation is not allowed while "
"the current status is %s." % db_app.status))
db_app.status = constants.APP_REMOVE_IN_PROGRESS
db_app.save()
pecan.request.rpcapi.perform_app_remove(pecan.request.context,
db_app)
return KubeApp.convert_with_links(db_app)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, name):
"""Delete the application with the given name
:param name: application name
"""
self._check_environment()
try:
db_app = objects.kube_app.get_by_name(pecan.request.context, name)
except exception.KubeAppNotFound:
LOG.error("Received a request to delete app %s which does not "
"exist." % name)
raise
response = pecan.request.rpcapi.perform_app_delete(
pecan.request.context, db_app)
if response:
raise wsme.exc.ClientSideError(_(
"%s." % response))

View File

@ -1404,7 +1404,7 @@ SUPPORTED_HELM_CHARTS = [
# until we fully integrate our k8s openstack application. Not
# sure at this point if we'll need an AIO flavor (without Ceph
# but with NFS support)
HELM_APP_OPENSTACK = 'wr-openstack'
HELM_APP_OPENSTACK = 'stx-openstack'
HELM_APP_OSHELM_DEVELOPER = 'openstack-helm-developer'
HELM_APP_OSHELM_MULTINODE = 'openstack-helm-multinode'
@ -1480,3 +1480,35 @@ K8S_RBD_PROV_NAMESPACE_DEFAULT = "kube-system"
K8S_RBD_PROV_USER_NAME = 'admin'
K8S_RBD_PROV_ADMIN_SECRET_NAME = 'ceph-admin'
K8S_RBD_PROV_STOR_CLASS_NAME = 'general'
##################################
# Kubernetes application section #
##################################
# Working paths
APP_INSTALL_PATH = '/scratch/apps'
APP_MANIFEST_PATH = os.path.join(tsc.PLATFORM_PATH, 'armada', tsc.SW_VERSION)
# State constants
APP_UPLOAD_IN_PROGRESS = 'uploading'
APP_UPLOAD_SUCCESS = 'uploaded'
APP_UPLOAD_FAILURE = 'upload-failed'
APP_APPLY_IN_PROGRESS = 'applying'
APP_APPLY_SUCCESS = 'applied'
APP_APPLY_FAILURE = 'apply-failed'
APP_REMOVE_IN_PROGRESS = 'removing'
APP_REMOVE_FAILURE = 'remove-failed'
# Operation constants
APP_UPLOAD_OP = 'upload'
APP_APPLY_OP = 'apply'
APP_REMOVE_OP = 'remove'
APP_DELETE_OP = 'delete'
# Node label operation constants
LABEL_ASSIGN_OP = 'assign'
LABEL_REMOVE_OP = 'remove'
# Default node labels
CONTROL_PLANE_LABEL = 'openstack-control-plane=enabled'
COMPUTE_NODE_LABEL = 'openstack-compute-node=enabled'
OPENVSWITCH_LABEL = 'openvswitch=enabled'

View File

@ -266,6 +266,18 @@ class CephPoolSetParamFailure(CephFailure):
"Reason: %(reason)s")
class KubeAppUploadFailure(SysinvException):
message = _("Upload of application %(name)s failed: %(reason)s")
class KubeAppApplyFailure(SysinvException):
message = _("Deployment of application %(name)s failed: %(reason)s")
class KubeAppDeleteFailure(SysinvException):
message = _("Delete of application %(name)s failed: %(reason)s")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
@ -591,6 +603,10 @@ class HelmOverrideAlreadyExists(Conflict):
"%(namespace)s already exists.")
class KubeAppAlreadyExists(Conflict):
message = _("An application with name %(name)s already exists.")
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance: %(reason)s")
@ -945,6 +961,10 @@ class CertificateTypeNotFound(NotFound):
message = _("No certificate type of %(certtype)s")
class KubeAppNotFound(NotFound):
message = _("No application with name %(name)s.")
class SDNNotEnabled(SysinvException):
message = _("SDN configuration is not enabled.")

View File

@ -1788,3 +1788,20 @@ def _format_ceph_mon_address(ip_address, service_port_mon):
return '%s:%d' % (ip_address, service_port_mon)
else:
return '[%s]:%d' % (ip_address, service_port_mon)
def get_files_matching(path, pattern):
return [(root, file) for root, dirs, files in os.walk(path, topdown=True)
for file in files if file.endswith(pattern)]
def extract_tarfile(target_dir, tarfile):
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(['tar', '-xf', tarfile, '-m', '--no-same-owner',
'--no-same-permissions', '-C', target_dir],
stdout=fnull, stderr=fnull)
return True
except subprocess.CalledProcessError as e:
LOG.error("Error while extracting tarfile %s: %s" % (tarfile, e))
return False

View File

@ -0,0 +1,764 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System Inventory Kubernetes Application Operator."""
import docker
import eventlet
import grp
import os
import re
import shutil
import stat
import subprocess
import threading
import time
import yaml
from collections import namedtuple
from oslo_config import cfg
from oslo_log import log as logging
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.common import utils as cutils
from sysinv.helm import common
from sysinv.helm import helm
LOG = logging.getLogger(__name__)
kube_app_opts = [
cfg.StrOpt('armada_image_tag',
default=('quay.io/airshipit/armada:'
'f807c3a1ec727c883c772ffc618f084d960ed5c9'),
help='Docker image tag of Armada.'),
]
CONF = cfg.CONF
CONF.register_opts(kube_app_opts)
ARMADA_CONTAINER_NAME = 'armada_service'
ARMADA_ERRORS = ['ERROR', 'failed', 'timed out']
MAX_DOWNLOAD_THREAD = 20
Chart = namedtuple('Chart', 'name namespace')
class AppOperator(object):
"""Class to encapsulate Kubernetes App operations for System Inventory"""
def __init__(self, dbapi):
self._dbapi = dbapi
self._docker = DockerHelper()
self._helm = helm.HelmOperator(self._dbapi)
self._kube = kubernetes.KubeOperator(self._dbapi)
self._lock = threading.Lock()
def _cleanup(self, app):
"""" Remove application directories and override files """
try:
# TODO(tngo): Disable node labeling for system app for now until
# vim integration with sysinv for container support is ready
if not app.system_app and app.status != constants.APP_UPLOAD_FAILURE:
self._process_node_labels(app, op=constants.LABEL_REMOVE_OP)
if app.system_app and app.status != constants.APP_UPLOAD_FAILURE:
self._remove_chart_overrides(app.mfile_abs)
os.unlink(app.armada_mfile_abs)
shutil.rmtree(app.path)
except OSError as e:
LOG.exception(e)
def _update_app_status(self, app, new_status):
""" Persist new app status """
with self._lock:
app.status = new_status
def _abort_operation(self, app, operation):
if (app.status == constants.APP_UPLOAD_IN_PROGRESS):
self._update_app_status(app, constants.APP_UPLOAD_FAILURE)
elif (app.status == constants.APP_APPLY_IN_PROGRESS):
self._update_app_status(app, constants.APP_APPLY_FAILURE)
elif (app.status == constants.APP_REMOVE_IN_PROGRESS):
self._update_app_status(app, constants.APP_REMOVE_FAILURE)
LOG.error("Application %s aborted!." % operation)
def _extract_tarfile(self, app):
def _handle_extract_failure():
raise exception.KubeAppUploadFailure(
name=app.name,
reason="failed to extract tarfile content.")
try:
if not os.path.isdir(constants.APP_INSTALL_PATH):
# One time set up
os.makedirs(constants.APP_INSTALL_PATH)
os.makedirs(constants.APP_MANIFEST_PATH)
if not os.path.isdir(app.path):
os.makedirs(app.path)
if not cutils.extract_tarfile(app.path, app.tarfile):
_handle_extract_failure()
if os.path.isdir(app.charts_dir):
tar_filelist = cutils.get_files_matching(app.charts_dir,
'.tgz')
for p, f in tar_filelist:
if not cutils.extract_tarfile(p, os.path.join(p, f)):
_handle_extract_failure()
except OSError as e:
LOG.error(e)
_handle_extract_failure()
def _get_image_tags_by_path(self, path):
""" Mine the image tags from values.yaml files in the chart directory,
intended for custom apps. """
image_tags = []
ids = []
for r, f in cutils.get_files_matching(path, 'values.yaml'):
with open(os.path.join(r, f), 'r') as file:
try:
y = yaml.load(file)
ids = y["images"]["tags"].values()
except (TypeError, KeyError):
pass
image_tags.extend(ids)
return list(set(image_tags))
def _get_image_tags_by_charts(self, app_path, charts):
""" Mine the image tags from both the chart path and the overrides,
intended for system app. """
image_tags = []
for chart in charts:
tags = []
overrides = chart.namespace + '-' + chart.name + '.yaml'
overrides_file = os.path.join(common.HELM_OVERRIDES_PATH,
overrides)
chart_path = os.path.join(app_path, chart.name)
if os.path.exists(overrides_file):
with open(overrides_file, 'r') as file:
try:
y = yaml.load(file)
tags = y["data"]["values"]["images"]["tags"].values()
except (TypeError, KeyError):
LOG.info("Overrides file %s has no img tags" %
overrides_file)
if tags:
image_tags.extend(tags)
continue
# Either this chart does not have overrides file or image tags are
# not in its overrides file, walk the chart path to find image tags
chart_path = os.path.join(app_path, chart.name)
tags = self._get_image_tags_by_path(chart_path)
if tags:
image_tags.extend(tags)
return list(set(image_tags))
def _register_embedded_images(self, app):
"""
TODO(tngo):
=============
When we're ready to support air-gapped scenario and private images, the
following need to be done:
a. load the embedded images
b. tag and push them to the docker registery on the controller
c. find image tag IDs in each chart and replace their values with
new tags. Alternatively, document the image tagging convention
${MGMT_FLOATING_IP}:${REGISTRY_PORT}/<image-name>
(e.g. 192.168.204.2:9001/prom/mysqld-exporter)
to be referenced in the application Helm charts.
"""
raise exception.KubeAppApplyFailure(
name=app.name,
reason="embedded images are not yet supported.")
def _download_images(self, app):
if os.path.isdir(app.images_dir):
return self._register_embedded_images(app)
if app.system_app:
# Grab the image tags from the overrides. If they don't exist
# then mine them from the chart paths.
charts = self._get_list_of_charts(app.mfile_abs)
images_to_download = self._get_image_tags_by_charts(app.charts_dir,
charts)
else:
# For custom apps, mine image tags from application path
images_to_download = self._get_image_tags_by_path(app.path)
if images_to_download is None:
raise exception.KubeAppApplyFailure(
name=app.name,
reason="charts specify no docker images.")
total_count = len(images_to_download)
threads = min(MAX_DOWNLOAD_THREAD, total_count)
failed_downloads = []
start = time.time()
pool = eventlet.greenpool.GreenPool(size=threads)
for tag, rc in pool.imap(self._docker.download_an_image,
images_to_download):
if not rc:
failed_downloads.append(tag)
elapsed = time.time() - start
failed_count = len(failed_downloads)
if failed_count > 0:
raise exception.KubeAppApplyFailure(
name=app.name,
reason="failed to download one or more image(s).")
else:
LOG.info("All docker images for application %s were successfully "
"downloaded in %d seconds" % (app.name, elapsed))
def _validate_helm_charts(self, app):
failed_charts = []
for r, f in cutils.get_files_matching(app.charts_dir, 'Chart.yaml'):
# Eliminate redundant validation for system app
if app.system_app and '/charts/helm-toolkit' in r:
continue
try:
output = subprocess.check_output(['helm', 'lint', r])
if "no failures" in output:
LOG.info("Helm chart %s validated" % os.path.basename(r))
else:
LOG.error("Validation failed for helm chart %s" %
os.path.basename(r))
failed_charts.append(r)
except Exception as e:
raise exception.KubeAppUploadFailure(
name=app.name, reason=str(e))
if len(failed_charts) > 0:
raise exception.KubeAppUploadFailure(
name=app.name, reason="one or more charts failed validation.")
def _upload_helm_charts(self, app):
# Set env path for helm-upload execution
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
charts = [os.path.join(r, f)
for r, f in cutils.get_files_matching(app.charts_dir, '.tgz')]
with open(os.devnull, "w") as fnull:
for chart in charts:
try:
subprocess.check_call(['helm-upload', chart], env=env,
stdout=fnull, stderr=fnull)
LOG.info("Helm chart %s uploaded" % os.path.basename(chart))
except Exception as e:
raise exception.KubeAppUploadFailure(
name=app.name, reason=str(e))
def _validate_labels(self, labels):
expr = re.compile(r'[a-z0-9]([-a-z0-9]*[a-z0-9])')
for label in labels:
if not expr.match(label):
return False
return True
def _update_kubernetes_labels(self, hostname, label_dict):
body = {
'metadata': {
'labels': {}
}
}
body['metadata']['labels'].update(label_dict)
self._kube.kube_patch_node(hostname, body)
def _assign_host_labels(self, hosts, labels):
for host in hosts:
for label_str in labels:
k, v = label_str.split('=')
try:
self._dbapi.label_create(
host.id, {'host_id': host.id,
'label_key': k,
'label_value': v})
except exception.HostLabelAlreadyExists:
pass
label_dict = {k: v for k, v in (i.split('=') for i in labels)}
self._update_kubernetes_labels(host.hostname, label_dict)
def _find_label(self, host_uuid, label_str):
host_labels = self._dbapi.label_get_by_host(host_uuid)
for label_obj in host_labels:
if label_str == label_obj.label_key + '=' + label_obj.label_value:
return label_obj
return None
def _remove_host_labels(self, hosts, labels):
for host in hosts:
null_labels = {}
for label_str in labels:
lbl_obj = self._find_label(host.uuid, label_str)
if lbl_obj:
self._dbapi.label_destroy(lbl_obj.uuid)
key = lbl_obj.label_key
null_labels[key] = None
if null_labels:
self._update_kubernetes_labels(host.hostname, null_labels)
def _process_node_labels(self, app, op=constants.LABEL_ASSIGN_OP):
# Node labels are host personality based and are defined in
# metadata.yaml file in the following format:
# labels:
# controller: '<label-key1>=<value>, <label-key2>=<value>, ...'
# compute: '<label-key1>=<value>, <label-key2>=<value>, ...'
lfile = os.path.join(app.path, 'metadata.yaml')
controller_labels = []
compute_labels = []
controller_l = compute_l = None
controller_labels_set = set()
compute_labels_set = set()
if os.path.exists(lfile) and os.path.getsize(lfile) > 0:
with open(lfile, 'r') as f:
try:
y = yaml.load(f)
labels = y['labels']
except KeyError:
raise exception.KubeAppUploadFailure(
name=app.name,
reason="labels file contains no labels.")
for key, value in labels.iteritems():
if key == constants.CONTROLLER:
controller_l = value
elif key == constants.COMPUTE:
compute_l = value
else:
if not app.system_app:
LOG.info("Application %s does not require specific node "
"labeling." % app.name)
return
if controller_l:
controller_labels =\
controller_l.replace(',', ' ').split()
controller_labels_set = set(controller_labels)
if not self._validate_labels(controller_labels_set):
raise exception.KubeAppUploadFailure(
name=app.name,
reason="controller labels are malformed.")
if compute_l:
compute_labels =\
compute_l.replace(',', ' ').split()
compute_labels_set = set(compute_labels)
if not self._validate_labels(compute_labels_set):
raise exception.KubeAppUploadFailure(
name=app.name,
reason="compute labels are malformed.")
# Add the default labels for system app
if app.system_app:
controller_labels_set.add(constants.CONTROL_PLANE_LABEL)
compute_labels_set.add(constants.COMPUTE_NODE_LABEL)
compute_labels_set.add(constants.OPENVSWITCH_LABEL)
# Get controller host(s)
controller_hosts =\
self._dbapi.ihost_get_by_personality(constants.CONTROLLER)
if constants.COMPUTE in controller_hosts[0].subfunctions:
# AIO system
labels = controller_labels_set.union(compute_labels_set)
if op == constants.LABEL_ASSIGN_OP:
self._assign_host_labels(controller_hosts, labels)
elif op == constants.LABEL_REMOVE_OP:
self._remove_host_labels(controller_hosts, labels)
else:
# Standard system
compute_hosts =\
self._dbapi.ihost_get_by_personality(constants.COMPUTE)
if op == constants.LABEL_ASSIGN_OP:
self._assign_host_labels(controller_hosts, controller_labels_set)
self._assign_host_labels(compute_hosts, compute_labels_set)
elif op == constants.LABEL_REMOVE_OP:
self._remove_host_labels(controller_hosts, controller_labels_set)
self._remove_host_labels(compute_hosts, compute_labels_set)
def _get_list_of_charts(self, manifest_file):
charts = []
with open(manifest_file, 'r') as f:
docs = yaml.load_all(f)
for doc in docs:
try:
if "armada/Chart/" in doc['schema']:
charts.append(Chart(
name=doc['data']['chart_name'],
namespace=doc['data']['namespace']))
except KeyError:
pass
return charts
def _get_overrides_files(self, charts):
"""Returns list of override files or None, used in
application-install and application-delete."""
missing_overrides = []
available_overrides = []
excluded = ['helm-toolkit']
for chart in charts:
overrides = chart.namespace + '-' + chart.name + '.yaml'
if chart.name in excluded:
LOG.debug("Skipping overrides %s " % overrides)
continue
overrides_file = os.path.join(common.HELM_OVERRIDES_PATH,
overrides)
if not os.path.exists(overrides_file):
missing_overrides.append(overrides_file)
else:
available_overrides.append(overrides_file)
if missing_overrides:
LOG.error("Missing the following overrides: %s" % missing_overrides)
return None
return available_overrides
def _generate_armada_overrides_str(self, overrides_files):
return " ".join([' --values /overrides/{0}'.format(os.path.basename(i))
for i in overrides_files])
def _remove_chart_overrides(self, manifest_file):
charts = self._get_list_of_charts(manifest_file)
for chart in charts:
if chart.name in constants.SUPPORTED_HELM_CHARTS:
self._helm.remove_helm_chart_overrides(chart.name,
chart.namespace)
def perform_app_upload(self, rpc_app, tarfile):
"""Process application upload request
This method validates the application manifest. If Helm charts are
included, they are validated and uploaded to local Helm repo. It also
downloads the required docker images for custom apps during upload
stage.
:param rpc_app: application object in the RPC request
:param tarfile: location of application tarfile
"""
app = AppOperator.Application(rpc_app)
LOG.info("Application (%s) upload started." % app.name)
try:
# Full extraction of application tarball at /scratch/apps.
# Manifest file is placed under /opt/platform/armada
# which is managed by drbd-sync and visible to Armada.
orig_mode = stat.S_IMODE(os.lstat("/scratch").st_mode)
app.tarfile = tarfile
self._extract_tarfile(app)
shutil.copy(app.mfile_abs, app.armada_mfile_abs)
if not self._docker.make_armada_request('validate', app.armada_mfile):
return self._abort_operation(app, constants.APP_UPLOAD_OP)
if os.path.isdir(app.charts_dir):
self._validate_helm_charts(app)
# Temporarily allow read and execute access to /scratch so www
# user can upload helm charts
os.chmod('/scratch', 0o755)
self._upload_helm_charts(app)
self._update_app_status(app, constants.APP_UPLOAD_SUCCESS)
LOG.info("Application (%s) upload completed." % app.name)
except Exception as e:
LOG.exception(e)
self._abort_operation(app, constants.APP_UPLOAD_OP)
finally:
os.chmod('/scratch', orig_mode)
def perform_app_apply(self, rpc_app):
"""Process application install request
This method processes node labels per configuration and invokes
Armada to apply the application manifest.
For OpenStack app (system app), the method generates combined
overrides (a merge between system and user overrides if available)
for the charts that comprise the app before downloading docker images
and applying the manifest.
Usage: the method can be invoked at initial install or after the
user has either made some manual configuration changes or
or applied (new) user overrides to some Helm chart(s) to
correct/update a previous manifest apply.
:param rpc_app: application object in the RPC request
"""
app = AppOperator.Application(rpc_app)
LOG.info("Application (%s) apply started." % app.name)
# TODO(tngo): Disable node labeling for system app for now until
# vim integration with sysinv for container support is ready
if not app.system_app:
self._process_node_labels(app)
overrides_str = ''
ready = True
try:
if app.system_app:
charts = self._get_list_of_charts(app.mfile_abs)
LOG.info("Generating application overrides...")
self._helm.generate_helm_application_overrides(
app.name, cnamespace=None, armada_format=True,
combined=True)
overrides_files = self._get_overrides_files(charts)
if overrides_files:
LOG.info("Application overrides generated.")
# Ensure all chart overrides are readable by Armada
for file in overrides_files:
os.chmod(file, 0644)
overrides_str =\
self._generate_armada_overrides_str(overrides_files)
self._download_images(app)
else:
ready = False
else:
# No support for custom app overrides at this point, just
# download the needed images.
self._download_images(app)
if ready:
if self._docker.make_armada_request('apply',
app.armada_mfile,
overrides_str):
self._update_app_status(app,
constants.APP_APPLY_SUCCESS)
LOG.info("Application (%s) apply completed." % app.name)
return
except Exception as e:
LOG.exception(e)
# If it gets here, something went wrong
self._abort_operation(app, constants.APP_APPLY_OP)
def perform_app_remove(self, rpc_app):
"""Process application remove request
This method invokes Armada to delete the application manifest.
For system app, it also cleans up old test pods.
:param rpc_app: application object in the RPC request
"""
app = AppOperator.Application(rpc_app)
LOG.info("Application (%s) remove started." % app.name)
if self._docker.make_armada_request('delete', app.armada_mfile):
if app.system_app:
try:
p1 = subprocess.Popen(['kubectl', 'get', 'pods', '-n',
'openstack'],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['awk', '/osh-.*-test/{print $1}'],
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['xargs', '-i', 'kubectl',
'delete', 'pods', '-n', 'openstack',
'{}'], stdin=p2.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
out, err = p3.communicate()
if not err:
LOG.info("Old test pods cleanup completed.")
except Exception as e:
LOG.exception("Failed to clean up test pods after app "
"removal: %s" % e)
self._update_app_status(app, constants.APP_UPLOAD_SUCCESS)
LOG.info("Application (%s) remove completed." % app.name)
else:
self._abort_operation(app, constants.APP_REMOVE_OP)
def perform_app_delete(self, rpc_app):
"""Process application remove request
This method removes the application entry from the database and
performs cleanup which entails removing node labels where applicable
and purge all application files from the system.
:param rpc_app: application object in the RPC request
"""
app = AppOperator.Application(rpc_app)
try:
self._dbapi.kube_app_destroy(app.name)
self._cleanup(app)
LOG.info("Application (%s) has been purged from the system." %
app.name)
msg = None
except Exception as e:
# Possible exceptions are KubeAppDeleteFailure,
# OSError and unexpectedly KubeAppNotFound
LOG.exception(e)
msg = str(e)
return msg
class Application(object):
""" Data object to encapsulate all data required to
support application related operations.
"""
def __init__(self, rpc_app):
self._kube_app = rpc_app
self.path = os.path.join(constants.APP_INSTALL_PATH,
self._kube_app.get('name'))
self.charts_dir = os.path.join(self.path, 'charts')
self.images_dir = os.path.join(self.path, 'images')
self.tarfile = None
self.system_app =\
(self._kube_app.get('name') == constants.HELM_APP_OPENSTACK)
self.armada_mfile =\
os.path.join('/manifests', self._kube_app.get('name') + "-" +
self._kube_app.get('manifest_file'))
self.armada_mfile_abs =\
os.path.join(constants.APP_MANIFEST_PATH,
self._kube_app.get('name') + "-" +
self._kube_app.get('manifest_file'))
self.mfile_abs =\
os.path.join(constants.APP_INSTALL_PATH,
self._kube_app.get('name'),
self._kube_app.get('manifest_file'))
@property
def name(self):
return self._kube_app.get('name')
@property
def mfile(self):
return self._kube_app.get('manifest_file')
@property
def status(self):
return self._kube_app.get('status')
@status.setter
def status(self, new_status):
self._kube_app.status = new_status
self._kube_app.save()
class DockerHelper(object):
""" Utility class to encapsulate Docker related operations """
def _start_armada_service(self, client):
try:
container = client.containers.get(ARMADA_CONTAINER_NAME)
if container.status != 'running':
LOG.info("Restarting Armada service...")
container.restart()
return container
except Exception:
LOG.info("Starting Armada service...")
try:
# First make kubernetes config accessible to Armada. This
# is a work around the permission issue in Armada container.
install_dir = constants.APP_INSTALL_PATH
kube_config = os.path.join(install_dir, 'admin.conf')
shutil.copy('/etc/kubernetes/admin.conf', kube_config)
os.chown(kube_config, 1000, grp.getgrnam("wrs").gr_gid)
overrides_dir = common.HELM_OVERRIDES_PATH
manifests_dir = constants.APP_MANIFEST_PATH
LOG.info("kube_config=%s, manifests_dir=%s, "
"overrides_dir=%s." % (kube_config, manifests_dir,
overrides_dir))
binds = {
kube_config: {'bind': '/armada/.kube/config', 'mode': 'ro'},
manifests_dir: {'bind': '/manifests', 'mode': 'ro'},
overrides_dir: {'bind': '/overrides', 'mode': 'ro'}}
container = client.containers.run(
CONF.armada_image_tag,
name=ARMADA_CONTAINER_NAME,
detach=True,
volumes=binds,
restart_policy={'Name': 'always'},
command=None)
LOG.info("Armada service started!")
return container
except OSError as oe:
LOG.error("Unable to make kubernetes config accessible to "
"armada: %s" % oe)
except Exception as e:
# Possible docker exceptions are: RuntimeError, ContainerError,
# ImageNotFound and APIError
LOG.error("Docker error while launching Armada container: %s", e)
os.unlink(kube_config)
return None
def make_armada_request(self, request, manifest_file, overrides_str=''):
rc = True
try:
client = docker.from_env()
armada_svc = self._start_armada_service(client)
if armada_svc:
if request == 'validate':
cmd = 'armada validate ' + manifest_file
exec_logs = armada_svc.exec_run(cmd)
if "Successfully validated" in exec_logs:
LOG.info("Manifest file %s was successfully validated." %
manifest_file)
else:
rc = False
LOG.error("Validation of the armada manifest %s "
"failed: %s" % (manifest_file, exec_logs))
elif request == 'apply':
cmd = 'armada apply --debug ' + manifest_file + overrides_str
LOG.info("Armada apply command = %s" % cmd)
exec_logs = armada_svc.exec_run(cmd)
if not any(str in exec_logs for str in ARMADA_ERRORS):
LOG.info("Application manifest %s was successfully "
"applied/re-applied." % manifest_file)
else:
rc = False
LOG.error("Failed to apply application manifest: %s" %
exec_logs)
elif request == 'delete':
cmd = 'armada delete --debug --manifest ' + manifest_file
exec_logs = armada_svc.exec_run(cmd)
if not any(str in exec_logs for str in ARMADA_ERRORS):
LOG.info("Application charts were successfully "
"deleted.")
else:
rc = False
LOG.error("Delete the application manifest failed: %s" %
exec_logs)
else:
rc = False
LOG.error("Unsupported armada request: %s." % request)
else:
# Armada sevice failed to start/restart
rc = False
except Exception as e:
# Failed to get a docker client
rc = False
LOG.error("Armada request %s for manifest %s failed: %s " %
(request, manifest_file, e))
return rc
def download_an_image(self, img_tag):
rc = True
start = time.time()
try:
LOG.info("Image %s download started" % img_tag)
c = docker.from_env()
c.images.pull(img_tag)
except Exception as e:
rc = False
LOG.error("Image %s download failed: %s" % (img_tag, e))
elapsed_time = time.time() - start
LOG.info("Image %s download succeeded in %d seconds" %
(img_tag, elapsed_time))
return img_tag, rc

View File

@ -79,6 +79,7 @@ from sysinv.common.retrying import retry
from sysinv.common.storage_backend_conf import StorageBackendConfig
from cephclient import wrapper as ceph
from sysinv.conductor import ceph as iceph
from sysinv.conductor import kube_app
from sysinv.conductor import openstack
from sysinv.db import api as dbapi
from sysinv.objects import base as objects_base
@ -151,6 +152,7 @@ class ConductorManager(service.PeriodicService):
self.dbapi = None
self.fm_api = None
self.fm_log = None
self._app = None
self._ceph = None
self._ceph_api = ceph.CephWrapper(
endpoint='http://localhost:5001/api/v0.1/')
@ -179,6 +181,7 @@ class ConductorManager(service.PeriodicService):
self._openstack = openstack.OpenStackOperator(self.dbapi)
self._puppet = puppet.PuppetOperator(self.dbapi)
self._app = kube_app.AppOperator(self.dbapi)
self._ceph = iceph.CephOperator(self.dbapi)
self._helm = helm.HelmOperator(self.dbapi)
self._kube = kubernetes.KubeOperator(self.dbapi)
@ -10302,6 +10305,17 @@ class ConductorManager(service.PeriodicService):
"""
return self._helm.get_helm_application_overrides(app_name, cnamespace)
def merge_overrides(self, context, file_overrides=[], set_overrides=[]):
"""Merge the file and set overrides into a single chart overrides.
:param context: request context.
:param file_overrides: (optional) list of overrides from files
:param set_overrides: (optional) list of parameter overrides
:returns: merged overrides string
"""
return self._helm.merge_overrides(file_overrides, set_overrides)
def update_kubernetes_label(self, context,
host_uuid, label_dict):
"""Synchronously, have the conductor update kubernetes label
@ -10353,3 +10367,40 @@ class ConductorManager(service.PeriodicService):
:returns: a list of keys
"""
return self._fernet.get_fernet_keys(key_id)
def perform_app_upload(self, context, rpc_app, tarfile):
"""Handling of application upload request (via AppOperator)
:param context: request context.
:param rpc_app: data object provided in the rpc request
:param tarfile: location of the application tarfile to be exracted
"""
self._app.perform_app_upload(rpc_app, tarfile)
def perform_app_apply(self, context, rpc_app):
"""Handling of application install request (via AppOperator)
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self._app.perform_app_apply(rpc_app)
def perform_app_remove(self, context, rpc_app):
"""Handling of application removal request (via AppOperator)
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self._app.perform_app_remove(rpc_app)
def perform_app_delete(self, context, rpc_app):
"""Handling of application delete request (via AppOperator)
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self._app.perform_app_delete(rpc_app)

View File

@ -1686,6 +1686,20 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
app_name=app_name,
cnamespace=cnamespace))
def merge_overrides(self, context, file_overrides=[], set_overrides=[]):
"""Merge the file and set overrides into a single chart overrides.
:param context: request context.
:param file_overrides: (optional) list of overrides from files
:param set_overrides: (optional) list of parameter overrides
:returns: merged overrides string
"""
return self.call(context,
self.make_msg('merge_overrides',
file_overrides=file_overrides,
set_overrides=set_overrides))
def update_kubernetes_label(self, context, host_uuid, label_dict):
"""Synchronously, have the conductor update kubernetes label.
@ -1727,3 +1741,47 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
"""
return self.call(context, self.make_msg('get_fernet_keys',
key_id=key_id))
def perform_app_upload(self, context, rpc_app, tarfile):
"""Handle application upload request
:param context: request context.
:param rpc_app: data object provided in the rpc request
:param tafile: location of application tarfile to be extracted
"""
return self.cast(context,
self.make_msg('perform_app_upload',
rpc_app=rpc_app,
tarfile=tarfile))
def perform_app_apply(self, context, rpc_app):
"""Handle application apply request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.cast(context,
self.make_msg('perform_app_apply',
rpc_app=rpc_app))
def perform_app_remove(self, context, rpc_app):
"""Handle application remove request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.cast(context,
self.make_msg('perform_app_remove',
rpc_app=rpc_app))
def perform_app_delete(self, context, rpc_app):
"""Handle application delete request
:param context: request context.
:param rpc_app: data object provided in the rpc request
"""
return self.call(context,
self.make_msg('perform_app_delete',
rpc_app=rpc_app))

View File

@ -7517,3 +7517,65 @@ class Connection(api.Connection):
query = model_query(models.Label, read_deleted="no")
query = query.filter(models.Label.label_key == label)
return query.count()
def _kube_app_get(self, name):
query = model_query(models.KubeApp)
query = query.filter_by(name=name)
try:
result = query.one()
except NoResultFound:
raise exception.KubeAppNotFound(name=name)
return result
@objects.objectify(objects.kube_app)
def kube_app_create(self, values):
app = models.KubeApp()
app.update(values)
with _session_for_write() as session:
try:
session.add(app)
session.flush()
except db_exc.DBDuplicateEntry:
LOG.error("Failed to add application %s. "
"Already exists with this name" %
(values['name']))
raise exception.KubeAppAlreadyExists(
name=values['name'])
return self.kube_app_get(values['name'])
@objects.objectify(objects.kube_app)
def kube_app_get_all(self):
query = model_query(models.KubeApp)
return query.all()
@objects.objectify(objects.kube_app)
def kube_app_get(self, name):
return self._kube_app_get(name)
@objects.objectify(objects.kube_app)
def kube_app_update(self, name, values):
with _session_for_write() as session:
query = model_query(models.KubeApp, session=session)
query = query.filter_by(name=name)
count = query.update(values, synchronize_session='fetch')
if count == 0:
raise exception.KubeAppNotFound(name)
return query.one()
def kube_app_destroy(self, name):
with _session_for_write() as session:
query = model_query(models.KubeApp, session=session)
query = query.filter_by(name=name)
try:
app = query.one()
if app.status not in [constants.APP_UPLOAD_SUCCESS,
constants.APP_UPLOAD_FAILURE]:
failure_reason =\
"operation is not allowed while status is " + app.status
raise exception.KubeAppDeleteFailure(
name=name,
reason=failure_reason)
except NoResultFound:
raise exception.KubeAppNotFound(name)
query.delete()

View File

@ -0,0 +1,53 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import DateTime, String, Integer
from sqlalchemy import Column, MetaData, Table
from sysinv.openstack.common import log
ENGINE = 'InnoDB'
CHARSET = 'utf8'
LOG = log.getLogger(__name__)
def upgrade(migrate_engine):
"""
This database upgrade creates a new table for storing kubenetes
application info.
"""
meta = MetaData()
meta.bind = migrate_engine
# Define and create the helm_overrides table.
kube_app = Table(
'kube_app',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True),
Column('name', String(255), unique=True, nullable=False),
Column('manifest_name', String(255), nullable=False),
Column('manifest_file', String(255), nullable=True),
Column('status', String(255), nullable=False),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
kube_app.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -1634,3 +1634,13 @@ class Label(Base):
label_key = Column(String(384))
label_value = Column(String(128))
UniqueConstraint('host_id', 'label_key', name='u_host_id@label_key')
class KubeApp(Base):
__tablename__ = 'kube_app'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True, nullable=False)
manifest_name = Column(String(255), nullable=False)
manifest_file = Column(String(255), nullable=False)
status = Column(String(255), nullable=False)

View File

@ -8,8 +8,10 @@
from __future__ import absolute_import
import copy
import eventlet
import os
import subprocess
import tempfile
import yaml
@ -48,6 +50,7 @@ from . import rabbitmq
from . import rbd_provisioner
from . import nova_api_proxy
LOG = logging.getLogger(__name__)
@ -290,6 +293,67 @@ class HelmOperator(object):
}
return new_overrides
def merge_overrides(self, file_overrides=[], set_overrides=[]):
""" Merge helm overrides together.
:param values: A dict of different types of user override values,
'files' (which generally specify many overrides) and
'set' (which generally specify one override).
"""
# At this point we have potentially two separate types of overrides
# specified by system or user, values from files and values passed in
# via --set . We need to ensure that we call helm using the same
# mechanisms to ensure the same behaviour.
cmd = ['helm', 'install', '--dry-run', '--debug']
# Process the newly-passed-in override values
tmpfiles = []
for value_file in file_overrides:
# For values passed in from files, write them back out to
# temporary files.
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(value_file)
tmpfile.close()
tmpfiles.append(tmpfile.name)
cmd.extend(['--values', tmpfile.name])
for value_set in set_overrides:
cmd.extend(['--set', value_set])
env = os.environ.copy()
env['KUBECONFIG'] = '/etc/kubernetes/admin.conf'
# Make a temporary directory with a fake chart in it
try:
tmpdir = tempfile.mkdtemp()
chartfile = tmpdir + '/Chart.yaml'
with open(chartfile, 'w') as tmpchart:
tmpchart.write('name: mychart\napiVersion: v1\n'
'version: 0.1.0\n')
cmd.append(tmpdir)
# Apply changes by calling out to helm to do values merge
# using a dummy chart.
output = subprocess.check_output(cmd, env=env)
# Check output for failure
# Extract the info we want.
values = output.split('USER-SUPPLIED VALUES:\n')[1].split(
'\nCOMPUTED VALUES:')[0]
except Exception:
raise
finally:
os.remove(chartfile)
os.rmdir(tmpdir)
for tmpfile in tmpfiles:
os.remove(tmpfile)
return values
@helm_context
def generate_helm_chart_overrides(self, chart_name, cnamespace=None):
"""Generate system helm chart overrides
@ -326,7 +390,8 @@ class HelmOperator(object):
@helm_context
def generate_helm_application_overrides(self, app_name, cnamespace=None,
armada_format=False):
armada_format=False,
combined=False):
"""Create the system overrides files for a supported application
This method will generate system helm chart overrides yaml files for a
@ -339,12 +404,29 @@ class HelmOperator(object):
:param cnamespace: (optional) namespace
:param armada_format: (optional) whether to emit in armada format
instead of helm format (with extra header)
:param combined: (optional) whether to apply user overrides on top of
system overrides
"""
if app_name in constants.SUPPORTED_HELM_APP_NAMES:
app_overrides = self._get_helm_application_overrides(app_name,
cnamespace)
for (chart_name, overrides) in iteritems(app_overrides):
if combined:
try:
db_chart = self.dbapi.helm_override_get(
chart_name, 'openstack')
user_overrides = db_chart.user_overrides
if user_overrides:
system_overrides = yaml.dump(overrides)
file_overrides = [system_overrides, user_overrides]
combined_overrides = self.merge_overrides(
file_overrides=file_overrides)
combined_overrides = yaml.load(combined_overrides)
overrides = copy.deepcopy(combined_overrides)
except exception.HelmOverrideNotFound:
pass
# If armada formatting is wanted, we need to change the
# structure of the yaml file somewhat
if armada_format:
@ -363,7 +445,7 @@ class HelmOperator(object):
"""Remove the overrides files for a chart"""
if chart_name in self.implemented_charts:
namespaces = self.chart_operators[chart_name].get_namespaces
namespaces = self.chart_operators[chart_name].get_namespaces()
filenames = []
if cnamespace and cnamespace in namespaces:

View File

@ -36,6 +36,7 @@ from sysinv.objects import port_ethernet
from sysinv.objects import helm_overrides
from sysinv.objects import host
from sysinv.objects import host_upgrade
from sysinv.objects import kube_app
from sysinv.objects import network_infra
from sysinv.objects import interface
from sysinv.objects import interface_ae
@ -181,6 +182,7 @@ storage_tier = storage_tier.StorageTier
storage_ceph_external = storage_ceph_external.StorageCephExternal
helm_overrides = helm_overrides.HelmOverrides
label = label.Label
kube_app = kube_app.KubeApp
__all__ = (system,
cluster,
@ -248,6 +250,7 @@ __all__ = (system,
storage_tier,
storage_ceph_external,
helm_overrides,
kube_app,
# alias objects for RPC compatibility
ihost,
ilvg,

View File

@ -0,0 +1,31 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class KubeApp(base.SysinvObject):
# VERSION 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {'name': utils.str_or_none,
'manifest_name': utils.str_or_none,
'manifest_file': utils.str_or_none,
'status': utils.str_or_none,
}
@base.remotable_classmethod
def get_by_name(cls, context, name):
return cls.dbapi.kube_app_get(name)
def save_changes(self, context, updates):
self.dbapi.kube_app_update(self.name, updates)

View File

@ -31,4 +31,5 @@ python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
python-cephclient
python-ldap>=2.4.22,<3.0.0
markupsafe
docker
# Babel>=0.9.6