Merge "Remove the is_kubernetes checks from sysinv"

This commit is contained in:
Zuul 2019-06-12 21:54:57 +00:00 committed by Gerrit Code Review
commit 7701667eca
15 changed files with 132 additions and 366 deletions

View File

@ -3794,8 +3794,7 @@ class ConfigAssistant():
'vswitch_type': str(self.vswitch_type),
'shared_services': str(self.shared_services),
'sdn_enabled': self.enable_sdn,
'https_enabled': self.enable_https,
'kubernetes_enabled': self.kubernetes}
'https_enabled': self.enable_https}
system_type = utils.get_system_type()

View File

@ -71,8 +71,7 @@ def populate_system_config(client):
'vswitch_type': 'none',
'shared_services': '[]',
'sdn_enabled': False,
'https_enabled': False,
'kubernetes_enabled': True}
'https_enabled': False}
values = {
'system_mode': CONF.get('BOOTSTRAP_CONFIG', 'SYSTEM_MODE'),

View File

@ -2110,7 +2110,7 @@ class HostController(rest.RestController):
ibm_msg_dict)
# Trigger a system app reapply if the host has been unlocked
if (utils.is_kubernetes_config() and patched_ihost.get('action') in
if (patched_ihost.get('action') in
[constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION]):
self._reapply_system_app()
@ -2300,17 +2300,6 @@ class HostController(rest.RestController):
storage_nodes = pecan.request.dbapi.ihost_get_by_personality(
constants.STORAGE)
if len(storage_nodes) == 1:
# TODO(CephPoolsDecouple): rework
# delete osd pools
# It would be nice if we have a ceph API that can delete
# all osd pools at once.
if not utils.is_kubernetes_config():
pools = pecan.request.rpcapi.list_osd_pools(
pecan.request.context)
for ceph_pool in pools:
pecan.request.rpcapi.delete_osd_pool(
pecan.request.context, ceph_pool)
# update tier status
tier_list = pecan.request.dbapi.storage_tier_get_list()
for tier in tier_list:
@ -2346,13 +2335,12 @@ class HostController(rest.RestController):
return
openstack_worker = False
if utils.is_kubernetes_config():
labels = objects.label.get_by_host_id(pecan.request.context, ihost.uuid)
for l in labels:
if (constants.COMPUTE_NODE_LABEL ==
str(l.label_key) + '=' + str(l.label_value)):
openstack_worker = True
break
labels = objects.label.get_by_host_id(pecan.request.context, ihost.uuid)
for l in labels:
if (constants.COMPUTE_NODE_LABEL ==
str(l.label_key) + '=' + str(l.label_value)):
openstack_worker = True
break
idict = {'operation': constants.DELETE_ACTION,
'uuid': ihost.uuid,
@ -3312,7 +3300,7 @@ class HostController(rest.RestController):
raise wsme.exc.ClientSideError(msg)
def _semantic_check_data_interfaces(
self, ihost, kubernetes_config, force_unlock=False):
self, ihost, force_unlock=False):
"""
Perform semantic checks against data interfaces to ensure validity of
the node configuration prior to unlocking it.
@ -3320,7 +3308,6 @@ class HostController(rest.RestController):
ihost_iinterfaces = (
pecan.request.dbapi.iinterface_get_by_ihost(ihost['uuid']))
vswitch_type = utils.get_vswitch_type()
data_interface_configured = False
for iif in ihost_iinterfaces:
if ((vswitch_type == constants.VSWITCH_TYPE_OVS_DPDK) and
(iif.ifclass == constants.INTERFACE_CLASS_DATA)):
@ -3330,14 +3317,6 @@ class HostController(rest.RestController):
if not iif.ifclass:
continue
self._semantic_check_sriov_interface(ihost, iif, force_unlock)
if iif.ifclass == constants.NETWORK_TYPE_DATA:
data_interface_configured = True
if not data_interface_configured and not kubernetes_config:
msg = _("Can not unlock a worker host without data interfaces. "
"Add at least one data interface before re-attempting "
"this command.")
raise wsme.exc.ClientSideError(msg)
def _semantic_check_data_addresses(self, ihost):
"""
@ -5267,24 +5246,8 @@ class HostController(rest.RestController):
# Check whether a restore was properly completed
self._semantic_check_restore_complete(ihost)
# Disable certain worker unlock checks in a kubernetes config
kubernetes_config = utils.is_kubernetes_config()
if kubernetes_config:
self._semantic_check_data_interfaces(ihost,
kubernetes_config,
force_unlock)
else:
# sdn configuration check
self._semantic_check_sdn_attributes(ihost)
# check whether data route gateways are reachable
self._semantic_check_data_routes(ihost)
# check whether data interfaces have been configured
self._semantic_check_data_interfaces(ihost,
kubernetes_config,
force_unlock)
self._semantic_check_data_addresses(ihost)
self._semantic_check_data_vrs_attributes(ihost)
self._semantic_check_data_interfaces(ihost,
force_unlock)
# Check if cpu assignments are valid
self._semantic_check_worker_cpu_assignments(ihost)
@ -5950,21 +5913,20 @@ class HostController(rest.RestController):
% hostupdate.displayid)
raise wsme.exc.ClientSideError(msg)
if utils.is_kubernetes_config():
# Check if there is a cluster-host interface on
# controller/worker/storage
host_interfaces = pecan.request.dbapi.iinterface_get_by_ihost(
ihost['uuid'])
network = pecan.request.dbapi.network_get_by_type(
constants.NETWORK_TYPE_CLUSTER_HOST)
for iif in host_interfaces:
if iif.networks and str(network.id) in iif.networks:
break
else:
msg = _("Cannot unlock host %s "
"without configuring a cluster-host interface."
% hostupdate.displayid)
raise wsme.exc.ClientSideError(msg)
# Check if there is a cluster-host interface on
# controller/worker/storage
host_interfaces = pecan.request.dbapi.iinterface_get_by_ihost(
ihost['uuid'])
network = pecan.request.dbapi.network_get_by_type(
constants.NETWORK_TYPE_CLUSTER_HOST)
for iif in host_interfaces:
if iif.networks and str(network.id) in iif.networks:
break
else:
msg = _("Cannot unlock host %s "
"without configuring a cluster-host interface."
% hostupdate.displayid)
raise wsme.exc.ClientSideError(msg)
hostupdate.configure_required = True

View File

@ -2163,19 +2163,7 @@ def _neutron_providernet_extension_supported():
"""
# In the case of a kubernetes config, neutron may not be running, and
# sysinv should not rely on talking to containerized neutron.
if utils.is_kubernetes_config():
return False
return True
# TODO: This should be looking at the neutron extension list, but because
# our config file is not setup properly to have a different region on a per
# service basis we cannot.
#
# The code should like something like this:
#
# extensions = pecan.request.rpcapi.neutron_extension_list(
# pecan.request.context)
# return bool(constants.NEUTRON_WRS_PROVIDER_ALIAS in extensions)
return False
def _neutron_providernet_list():

View File

@ -126,10 +126,6 @@ class KubeAppController(rest.RestController):
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _check_environment(self):
if not utils.is_kubernetes_config():
raise exception.OperationNotPermitted
def _check_tarfile(self, app_tarfile, app_name, app_version, operation):
def _handle_upload_failure(reason):
raise wsme.exc.ClientSideError(_(
@ -189,23 +185,18 @@ class KubeAppController(rest.RestController):
@wsme_pecan.wsexpose(KubeAppCollection)
def get_all(self):
self._check_environment()
apps = pecan.request.dbapi.kube_app_get_all()
return KubeAppCollection.convert_with_links(apps)
@wsme_pecan.wsexpose(KubeApp, wtypes.text)
def get_one(self, app_name):
"""Retrieve a single application."""
self._check_environment()
return self._get_one(app_name)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(KubeApp, body=types.apidict)
def post(self, body):
"""Uploading an application to be deployed by Armada"""
self._check_environment()
tarfile = body.get('tarfile')
name = body.get('name', '')
version = body.get('app_version', '')
@ -245,8 +236,6 @@ class KubeAppController(rest.RestController):
:param name: application name
:param directive: either 'apply' (fresh install/update) or 'remove'
"""
self._check_environment()
if directive not in ['apply', 'remove']:
raise exception.OperationNotPermitted
@ -397,8 +386,6 @@ class KubeAppController(rest.RestController):
:param name: application name
"""
self._check_environment()
try:
db_app = objects.kube_app.get_by_name(pecan.request.context, name)
except exception.KubeAppNotFound:

View File

@ -564,12 +564,6 @@ def _check_host(stor):
raise wsme.exc.ClientSideError(_("Host %s must be locked." %
ihost['hostname']))
# semantic check: only storage nodes are allowed without k8s
if (not utils.is_kubernetes_config(pecan.request.dbapi) and
ihost['personality'] != constants.STORAGE):
msg = ("Host personality must be 'storage' or kubernetes enabled.")
raise wsme.exc.ClientSideError(_(msg))
# semantic check: whether system has a ceph backend
if not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,

View File

@ -1262,11 +1262,8 @@ def _patch(storceph_uuid, patch):
# these services depend on will not trigger manifest application.
fast_config = False
if not (delta - set(['capabilities']) - set(['services'])):
if utils.is_kubernetes_config(pecan.request.dbapi):
fast_cfg_services = [constants.SB_SVC_NOVA, constants.SB_SVC_RBD_PROVISIONER,
constants.SB_SVC_CINDER, constants.SB_SVC_GLANCE]
else:
fast_cfg_services = [constants.SB_SVC_NOVA, constants.SB_SVC_RBD_PROVISIONER]
fast_cfg_services = [constants.SB_SVC_NOVA, constants.SB_SVC_RBD_PROVISIONER,
constants.SB_SVC_CINDER, constants.SB_SVC_GLANCE]
# Changes to unrelated capabilities?
storceph_cap = storceph_config.as_dict()['capabilities'].items()
@ -1356,52 +1353,29 @@ def _patch(storceph_uuid, patch):
LOG.info("SYS_I orig storage_ceph: %s " % ostorceph.as_dict())
LOG.info("SYS_I patched storage_ceph: %s " % storceph_config.as_dict())
# TODO(CephPoolsDecouple): remove block
if not utils.is_kubernetes_config():
if _is_quotaconfig_changed(ostorceph, storceph_config):
_check_pool_quotas_data(ostorceph, storceph_config.as_dict())
_update_pool_quotas(storceph_config.as_dict())
# check again after update
_check_pool_quotas_data(ostorceph, storceph_config.as_dict())
else:
LOG.info("Don't check quotas")
LOG.info("Don't check quotas")
# TODO(CephPoolsDecouple): remove condition
if not quota_only_update or utils.is_kubernetes_config():
# Execute the common semantic checks for all backends, if backend
# is not present this will not return.
api_helper.common_checks(constants.SB_API_OP_MODIFY,
rpc_storceph.as_dict())
# Execute the common semantic checks for all backends, if backend
# is not present this will not return.
api_helper.common_checks(constants.SB_API_OP_MODIFY,
rpc_storceph.as_dict())
# Run the backend specific semantic checks
_check_backend_ceph(constants.SB_API_OP_MODIFY,
rpc_storceph.as_dict(),
True)
# Run the backend specific semantic checks
_check_backend_ceph(constants.SB_API_OP_MODIFY,
rpc_storceph.as_dict(),
True)
# TODO (rchurch): In R6, refactor and remove object_gateway
# attribute and DB column. This should be driven by if the service
# is added to the services list
if object_gateway_install:
_check_object_gateway_install(pecan.request.dbapi)
# TODO (rchurch): In R6, refactor and remove object_gateway
# attribute and DB column. This should be driven by if the service
# is added to the services list
if object_gateway_install:
_check_object_gateway_install(pecan.request.dbapi)
for field in objects.storage_ceph.fields:
if (field in storceph_config.as_dict() and
rpc_storceph[field] != storceph_config.as_dict()[field]):
rpc_storceph[field] = storceph_config.as_dict()[field]
# TODO(CephPoolsDecouple): remove - on a containerized deployment,
# replication is updated through the helm charts.
# Update replication on the fly on a single node install.
if not utils.is_kubernetes_config():
if (replication_only_update and
utils.is_aio_simplex_system(pecan.request.dbapi)):
# For single node setups update replication number on the fly.
min_replication = new_cap.get(constants.CEPH_BACKEND_MIN_REPLICATION_CAP, None)
replication = new_cap.get(constants.CEPH_BACKEND_REPLICATION_CAP, None)
pecan.request.rpcapi.configure_osd_pools(
pecan.request.context, rpc_storceph, replication,
min_replication)
LOG.info("SYS_I new storage_ceph: %s " % rpc_storceph.as_dict())
try:
check_and_update_services(rpc_storceph.as_dict())

View File

@ -387,13 +387,6 @@ def get_distributed_cloud_role(dbapi=None):
return system.distributed_cloud_role
def is_kubernetes_config(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
system = dbapi.isystem_get_one()
return system.capabilities.get('kubernetes_enabled', False)
def is_aio_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
@ -418,13 +411,6 @@ def is_aio_duplex_system(dbapi=None):
system.system_mode == constants.SYSTEM_MODE_DUPLEX_DIRECT))
def is_aio_kubernetes(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
return SystemHelper.get_product_build() == constants.TIS_AIO_BUILD and \
is_kubernetes_config(dbapi)
def get_worker_count(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi

View File

@ -611,32 +611,19 @@ class CephApiOperator(object):
:return True if the data CEPH pools are empty
:return False if the data CEPH pools are not empty
"""
# TODO(CephPoolsDecouple): rework
if utils.is_kubernetes_config(db_api):
for ceph_pool in pools_usage:
# We only need to check data pools.
if (constants.CEPH_POOL_OBJECT_GATEWAY_NAME_PART in
ceph_pool['name']):
if not (
ceph_pool['name'].startswith(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL) or
ceph_pool['name'].startswith(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)):
continue
# Ceph pool is not empty.
if int(ceph_pool['stats']['bytes_used']) > 0:
return False
return True
# TODO(CephPoolsDecouple): remove iteration below
for ceph_pool in pools_usage:
# We only need to check data pools.
if ([pool for pool in constants.ALL_CEPH_POOLS
if ceph_pool['name'].startswith(pool)] and
int(ceph_pool['stats']['bytes_used']) > 0):
# Ceph pool is not empty.
if (constants.CEPH_POOL_OBJECT_GATEWAY_NAME_PART in
ceph_pool['name']):
if not (
ceph_pool['name'].startswith(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL) or
ceph_pool['name'].startswith(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)):
continue
# Ceph pool is not empty.
if int(ceph_pool['stats']['bytes_used']) > 0:
return False
return True

View File

@ -1005,11 +1005,9 @@ class CephOperator(object):
storage_hosts = self._db_api.ihost_get_by_personality(
constants.STORAGE)
is_aio_kubernetes = (
tsc.system_type == constants.TIS_AIO_BUILD and
utils.is_kubernetes_config(self._db_api))
is_aio = tsc.system_type == constants.TIS_AIO_BUILD
if not storage_hosts and is_aio_kubernetes:
if not storage_hosts and is_aio:
storage_hosts = self._db_api.ihost_get_by_personality(
constants.CONTROLLER)

View File

@ -1575,13 +1575,6 @@ class ConductorManager(service.PeriodicService):
host.action == constants.FORCE_UNLOCK_ACTION or
host.action == constants.UNLOCK_ACTION):
# TODO(CephPoolsDecouple): remove
# Ensure the OSD pools exists. In the case of a system restore,
# the pools must be re-created when the first storage node is
# unlocked.
if not utils.is_kubernetes_config(self.dbapi):
self._ceph.configure_osd_pools()
# Generate host configuration files
self._puppet.update_host_config(host)
else:
@ -4324,35 +4317,6 @@ class ConductorManager(service.PeriodicService):
if not availability:
return
kubernetes_config = utils.is_kubernetes_config(self.dbapi)
if (cutils.host_has_function(ihost, constants.WORKER) and not
kubernetes_config):
if availability == constants.VIM_SERVICES_ENABLED:
# report to nova the host aggregate groupings now that
# the worker node is available
LOG.info("AGG iplatform available for ihost= %s imsg= %s" %
(ihost_uuid, imsg_dict))
# AGG10 noted 13secs in vbox between nova manifests applied and
# reported by inv to conductor and available signal to
# nova conductor
for attempts in range(1, 10):
try:
if self._openstack.nova_host_available(ihost_uuid):
break
else:
LOG.error(
"AGG iplatform attempt failed for ihost= %s imsg= %s" % (
ihost_uuid, imsg_dict))
except Exception:
LOG.exception("nova_host_available exception, continuing!")
time.sleep(2)
elif availability == constants.AVAILABILITY_OFFLINE:
LOG.debug("AGG iplatform not available for ihost= %s imsg= %s" % (ihost_uuid, imsg_dict))
self._openstack.nova_host_offline(ihost_uuid)
if ((ihost.personality == constants.STORAGE and
ihost.hostname == constants.STORAGE_0_HOSTNAME) or
(ihost.personality == constants.CONTROLLER)):
@ -4373,41 +4337,6 @@ class ConductorManager(service.PeriodicService):
self.dbapi.ihost_update(ihost_uuid,
{'capabilities': ihost.capabilities})
storage_lvm = StorageBackendConfig.get_configured_backend_conf(
self.dbapi,
constants.CINDER_BACKEND_LVM
)
if (storage_lvm and ihost.personality == constants.CONTROLLER and
not kubernetes_config):
LOG.debug("iplatform monitor check system has lvm backend")
cinder_device = cutils._get_cinder_device(self.dbapi, ihost.id)
idisks = self.dbapi.idisk_get_by_ihost(ihost_uuid)
for idisk in idisks:
LOG.debug("checking for cinder disk device_path=%s "
"cinder_device=%s" %
(idisk.device_path, cinder_device))
if ((idisk.device_path and
idisk.device_path == cinder_device) or
(idisk.device_node and
idisk.device_node == cinder_device)):
idisk_capabilities = idisk.capabilities
idisk_dict = {'device_function': 'cinder_device'}
idisk_capabilities.update(idisk_dict)
idisk_val = {'capabilities': idisk_capabilities}
LOG.info("SYS_I MATCH host %s device_node %s cinder_device %s idisk.uuid %s val %s" %
(ihost.hostname,
idisk.device_node,
cinder_device,
idisk.uuid,
idisk_val))
self.dbapi.idisk_update(idisk.uuid, idisk_val)
if availability == constants.VIM_SERVICES_ENABLED:
self._resize_cinder_volumes()
if availability == constants.AVAILABILITY_AVAILABLE:
if imsg_dict.get(constants.SYSINV_AGENT_FIRST_REPORT):
# This should be run once after a node boot
@ -5014,8 +4943,7 @@ class ConductorManager(service.PeriodicService):
self._audit_ihost_action(host)
def _audit_kubernetes_labels(self, hosts):
if (not utils.is_kubernetes_config(self.dbapi) or
not cutils.is_initial_config_complete()):
if not cutils.is_initial_config_complete():
LOG.debug("_audit_kubernetes_labels skip")
return
@ -5047,29 +4975,8 @@ class ConductorManager(service.PeriodicService):
# TODO(CephPoolsDecouple): remove
@periodic_task.periodic_task(spacing=60)
def _osd_pool_audit(self, context):
if utils.is_kubernetes_config(self.dbapi):
LOG.debug("_osd_pool_audit skip")
return
# Only do the audit if ceph is configured.
if not StorageBackendConfig.has_backend(
self.dbapi,
constants.CINDER_BACKEND_CEPH
):
return
LOG.debug("_osd_pool_audit")
# Only run the pool audit task if we have at least one storage node
# available. Pools are created with initial PG num values and quotas
# when the first OSD is added. This is done with only controller-0
# and controller-1 forming a quorum in the cluster. Trigger the code
# that will look to scale the PG num values and validate pool quotas
# once a storage host becomes available.
if self._ceph.get_ceph_cluster_info_availability():
# periodically, perform audit of OSD pool
LOG.debug("Sysinv Conductor running periodic OSD pool audit task.")
self._ceph.audit_osd_pools_by_tier()
LOG.debug("_osd_pool_audit skip")
return
def set_backend_to_err(self, backend):
"""Set backend state to error"""
@ -7021,7 +6928,6 @@ class ConductorManager(service.PeriodicService):
system = self.dbapi.isystem_get_one()
system_dc_role = system.get('distributed_cloud_role', None)
kubernetes_config = system.capabilities.get('kubernetes_enabled', False)
LOG.info("Local Region Name: %s" % system.region_name)
@ -7222,44 +7128,43 @@ class ConductorManager(service.PeriodicService):
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
if kubernetes_config:
docker_lv_size = constants.KUBERNETES_DOCKER_STOR_SIZE
docker_lv_size = constants.KUBERNETES_DOCKER_STOR_SIZE
data = {
'name': constants.FILESYSTEM_NAME_DOCKER,
'size': docker_lv_size,
data = {
'name': constants.FILESYSTEM_NAME_DOCKER,
'size': docker_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER],
'replicated': False,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
# ETCD fs added to cgts-lv
etcd_lv_size = constants.ETCD_STOR_SIZE
data_etcd = {
'name': constants.FILESYSTEM_NAME_ETCD,
'size': etcd_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER],
'replicated': False,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
# ETCD fs added to cgts-lv
etcd_lv_size = constants.ETCD_STOR_SIZE
data_etcd = {
'name': constants.FILESYSTEM_NAME_ETCD,
'size': etcd_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_ETCD],
'replicated': True,
}
LOG.info("Creating FS:%s:%s %d" % (
data_etcd['name'], data_etcd['logical_volume'], data_etcd['size']))
self.dbapi.controller_fs_create(data_etcd)
data = {
'name': constants.FILESYSTEM_NAME_DOCKER_DISTRIBUTION,
'size': constants.DOCKER_DISTRIBUTION_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER_DISTRIBUTION],
constants.FILESYSTEM_NAME_ETCD],
'replicated': True,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
}
LOG.info("Creating FS:%s:%s %d" % (
data_etcd['name'], data_etcd['logical_volume'], data_etcd['size']))
self.dbapi.controller_fs_create(data_etcd)
data = {
'name': constants.FILESYSTEM_NAME_DOCKER_DISTRIBUTION,
'size': constants.DOCKER_DISTRIBUTION_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER_DISTRIBUTION],
'replicated': True,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
if (system_dc_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and
tsc.system_type != constants.TIS_AIO_BUILD):

View File

@ -90,13 +90,6 @@ class BasePuppet(object):
system = self._get_system()
return system.capabilities.get('sdn_enabled', False)
def _kubernetes_enabled(self):
if self.dbapi is None:
return False
system = self._get_system()
return system.capabilities.get('kubernetes_enabled', False)
def _https_enabled(self):
if self.dbapi is None:
return False

View File

@ -594,8 +594,7 @@ class CinderPuppet(openstack.OpenstackBasePuppet):
enabled_backends))
# Disable cinder services if kubernetes is enabled
if self._kubernetes_enabled():
is_service_enabled = False
is_service_enabled = False
config.update({
'openstack::cinder::params::service_enabled': is_service_enabled,

View File

@ -31,50 +31,48 @@ class KubernetesPuppet(base.BasePuppet):
def get_system_config(self):
config = {}
if self._kubernetes_enabled():
config.update(
{'platform::kubernetes::params::enabled': True,
'platform::kubernetes::params::pod_network_cidr':
self._get_pod_network_cidr(),
'platform::kubernetes::params::pod_network_ipversion':
self._get_pod_network_ipversion(),
'platform::kubernetes::params::service_network_cidr':
self._get_cluster_service_subnet(),
'platform::kubernetes::params::apiserver_advertise_address':
self._get_cluster_host_address(),
'platform::kubernetes::params::etcd_endpoint':
self._get_etcd_endpoint(),
'platform::kubernetes::params::service_domain':
self._get_dns_service_domain(),
'platform::kubernetes::params::dns_service_ip':
self._get_dns_service_ip(),
})
config.update(
{'platform::kubernetes::params::enabled': True,
'platform::kubernetes::params::pod_network_cidr':
self._get_pod_network_cidr(),
'platform::kubernetes::params::pod_network_ipversion':
self._get_pod_network_ipversion(),
'platform::kubernetes::params::service_network_cidr':
self._get_cluster_service_subnet(),
'platform::kubernetes::params::apiserver_advertise_address':
self._get_cluster_host_address(),
'platform::kubernetes::params::etcd_endpoint':
self._get_etcd_endpoint(),
'platform::kubernetes::params::service_domain':
self._get_dns_service_domain(),
'platform::kubernetes::params::dns_service_ip':
self._get_dns_service_ip(),
})
return config
def get_secure_system_config(self):
config = {}
if self._kubernetes_enabled():
# This is retrieving the certificates that 'kubeadm init'
# generated. We will want to change this to generate the
# certificates ourselves, store in hiera and then feed those
# back into 'kubeadm init'.
if os.path.exists('/etc/kubernetes/pki/ca.crt'):
# Store required certificates in configuration.
with open('/etc/kubernetes/pki/ca.crt', 'r') as f:
ca_crt = f.read()
with open('/etc/kubernetes/pki/ca.key', 'r') as f:
ca_key = f.read()
with open('/etc/kubernetes/pki/sa.key', 'r') as f:
sa_key = f.read()
with open('/etc/kubernetes/pki/sa.pub', 'r') as f:
sa_pub = f.read()
config.update(
{'platform::kubernetes::params::ca_crt': ca_crt,
'platform::kubernetes::params::ca_key': ca_key,
'platform::kubernetes::params::sa_key': sa_key,
'platform::kubernetes::params::sa_pub': sa_pub,
})
# This is retrieving the certificates that 'kubeadm init'
# generated. We will want to change this to generate the
# certificates ourselves, store in hiera and then feed those
# back into 'kubeadm init'.
if os.path.exists('/etc/kubernetes/pki/ca.crt'):
# Store required certificates in configuration.
with open('/etc/kubernetes/pki/ca.crt', 'r') as f:
ca_crt = f.read()
with open('/etc/kubernetes/pki/ca.key', 'r') as f:
ca_key = f.read()
with open('/etc/kubernetes/pki/sa.key', 'r') as f:
sa_key = f.read()
with open('/etc/kubernetes/pki/sa.pub', 'r') as f:
sa_pub = f.read()
config.update(
{'platform::kubernetes::params::ca_crt': ca_crt,
'platform::kubernetes::params::ca_key': ca_key,
'platform::kubernetes::params::sa_key': sa_key,
'platform::kubernetes::params::sa_pub': sa_pub,
})
return config
def get_host_config(self, host):

View File

@ -637,7 +637,4 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
return url
def _enable_nova_compute(self):
if self._kubernetes_enabled():
return False
else:
return True
return False