Remove optionality of kubernetes from manifests

Any place where the kubernetes::params::enabled flag
was checked, it is always considered to be true.

This leads to some manifest code never being invoked
so a subsequent commit will clean up those code blocks.

The openstack manifests are not included in this change
because most of those will be removed in a later commit.

Story: 2004764
Task: 29821
Change-Id: Id47c69d6da2d243c607bcb3db0836073321e4146
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2019-03-04 10:19:20 -06:00
parent 1b22b5313d
commit 384f0a23de
13 changed files with 431 additions and 637 deletions

View File

@ -164,10 +164,8 @@ class platform::ceph::post
class platform::ceph::monitor
inherits ::platform::ceph::params {
include ::platform::kubernetes::params
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
$k8s_enabled = $::platform::kubernetes::params::enabled
if $service_enabled {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
@ -191,7 +189,7 @@ class platform::ceph::monitor
$configure_ceph_mon = false
}
if $::personality == 'worker' and ! $configure_ceph_mon and $k8s_enabled {
if $::personality == 'worker' and ! $configure_ceph_mon {
# Reserve space for ceph-mon on all worker nodes.
include ::platform::filesystem::params
logical_volume { $mon_lv_name:
@ -222,7 +220,7 @@ class platform::ceph::monitor
fs_options => $mon_fs_options,
} -> Class['::ceph']
if $k8s_enabled and $::personality == 'worker' {
if $::personality == 'worker' {
Platform::Filesystem[$mon_lv_name] -> Class['platform::filesystem::docker']
}

View File

@ -35,17 +35,11 @@ class platform::config::file {
include ::platform::network::infra::params
include ::platform::network::oam::params
include ::platform::network::cluster_host::params
include ::platform::kubernetes::params
include ::openstack::horizon::params
$kubernetes_enabled = $::platform::kubernetes::params::enabled
# dependent template variables
$management_interface = $::platform::network::mgmt::params::interface_name
if $kubernetes_enabled {
$infrastructure_interface = $::platform::network::cluster_host::params::interface_name
} else {
$infrastructure_interface = $::platform::network::infra::params::interface_name
}
$infrastructure_interface = $::platform::network::cluster_host::params::interface_name
$oam_interface = $::platform::network::oam::params::interface_name
$platform_conf = '/etc/platform/platform.conf'

View File

@ -64,14 +64,8 @@ class platform::dns::dnsmasq {
}
include ::platform::kubernetes::params
$kubernetes_enabled = $::platform::kubernetes::params::enabled
if $kubernetes_enabled {
$service_domain = $::platform::kubernetes::params::service_domain
$dns_service_ip = $::platform::kubernetes::params::dns_service_ip
} else {
$service_domain = undef
$dns_service_ip = undef
}
$service_domain = $::platform::kubernetes::params::service_domain
$dns_service_ip = $::platform::kubernetes::params::dns_service_ip
file { '/etc/dnsmasq.conf':
ensure => 'present',

View File

@ -13,38 +13,33 @@ class platform::docker::params (
class platform::docker::config
inherits ::platform::docker::params {
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
if $http_proxy or $https_proxy {
file { '/etc/systemd/system/docker.service.d':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/etc/systemd/system/docker.service.d/http-proxy.conf':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/dockerproxy.conf.erb'),
}
if $http_proxy or $https_proxy {
file { '/etc/systemd/system/docker.service.d':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
Class['::platform::filesystem::docker'] ~> Class[$name]
service { 'docker':
ensure => 'running',
name => 'docker',
enable => true,
require => Package['docker']
}
-> exec { 'enable-docker':
command => '/usr/bin/systemctl enable docker.service',
-> file { '/etc/systemd/system/docker.service.d/http-proxy.conf':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/dockerproxy.conf.erb'),
}
}
Class['::platform::filesystem::docker'] ~> Class[$name]
service { 'docker':
ensure => 'running',
name => 'docker',
enable => true,
require => Package['docker']
}
-> exec { 'enable-docker':
command => '/usr/bin/systemctl enable docker.service',
}
}
class platform::docker::install

View File

@ -3,53 +3,50 @@ class platform::dockerdistribution::params (
class platform::dockerdistribution::config
inherits ::platform::dockerdistribution::params {
$enabled = $::platform::kubernetes::params::enabled
if $enabled {
include ::platform::network::mgmt::params
include ::platform::docker::params
include ::platform::network::mgmt::params
include ::platform::docker::params
$docker_registry_ip = $::platform::network::mgmt::params::controller_address
$docker_registry_ip = $::platform::network::mgmt::params::controller_address
# check insecure registries
if $::platform::docker::params::insecure_registry {
# insecure registry is true means unified registry was set
$insecure_registries = "\"${::platform::docker::params::k8s_registry}\", \"${docker_registry_ip}:9001\""
} else {
$insecure_registries = "\"${docker_registry_ip}:9001\""
}
# check insecure registries
if $::platform::docker::params::insecure_registry {
# insecure registry is true means unified registry was set
$insecure_registries = "\"${::platform::docker::params::k8s_registry}\", \"${docker_registry_ip}:9001\""
} else {
$insecure_registries = "\"${docker_registry_ip}:9001\""
}
# currently docker registry is running insecure mode
# when proper authentication is implemented, this would go away
file { '/etc/docker':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/daemon.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/insecuredockerregistry.conf.erb'),
}
# currently docker registry is running insecure mode
# when proper authentication is implemented, this would go away
file { '/etc/docker':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/daemon.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/insecuredockerregistry.conf.erb'),
}
-> file { '/etc/docker-distribution/registry/config.yml':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/dockerdistribution.conf.erb'),
}
-> file { '/etc/docker-distribution/registry/config.yml':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/dockerdistribution.conf.erb'),
}
# copy the startup script to where it is supposed to be
file {'docker_distribution_initd_script':
ensure => 'present',
path => '/etc/init.d/docker-distribution',
mode => '0755',
source => "puppet:///modules/${module_name}/docker-distribution"
}
# copy the startup script to where it is supposed to be
file {'docker_distribution_initd_script':
ensure => 'present',
path => '/etc/init.d/docker-distribution',
mode => '0755',
source => "puppet:///modules/${module_name}/docker-distribution"
}
}
@ -57,47 +54,40 @@ class platform::dockerdistribution::config
# the registry. This will go away when proper authentication is implemented
class platform::dockerdistribution::compute
inherits ::platform::dockerdistribution::params {
include ::platform::kubernetes::params
$enabled = $::platform::kubernetes::params::enabled
if $enabled {
include ::platform::network::mgmt::params
include ::platform::docker::params
include ::platform::network::mgmt::params
include ::platform::docker::params
$docker_registry_ip = $::platform::network::mgmt::params::controller_address
$docker_registry_ip = $::platform::network::mgmt::params::controller_address
# check insecure registries
if $::platform::docker::params::insecure_registry {
# insecure registry is true means unified registry was set
$insecure_registries = "\"${::platform::docker::params::k8s_registry}\", \"${docker_registry_ip}:9001\""
} else {
$insecure_registries = "\"${docker_registry_ip}:9001\""
}
# check insecure registries
if $::platform::docker::params::insecure_registry {
# insecure registry is true means unified registry was set
$insecure_registries = "\"${::platform::docker::params::k8s_registry}\", \"${docker_registry_ip}:9001\""
} else {
$insecure_registries = "\"${docker_registry_ip}:9001\""
}
# currently docker registry is running insecure mode
# when proper authentication is implemented, this would go away
file { '/etc/docker':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/daemon.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/insecuredockerregistry.conf.erb'),
}
# currently docker registry is running insecure mode
# when proper authentication is implemented, this would go away
file { '/etc/docker':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/daemon.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/insecuredockerregistry.conf.erb'),
}
}
class platform::dockerdistribution
inherits ::platform::dockerdistribution::params {
$enabled = $::platform::kubernetes::params::enabled
if $enabled {
include platform::dockerdistribution::config
include platform::dockerdistribution::config
Class['::platform::docker::config'] -> Class[$name]
}
Class['::platform::docker::config'] -> Class[$name]
}

View File

@ -319,8 +319,6 @@ class platform::drbd::etcd::params (
class platform::drbd::etcd (
) inherits ::platform::drbd::etcd::params {
include ::platform::kubernetes::params
if str2bool($::is_initial_config_primary) {
$drbd_primary = true
$drbd_initial = true
@ -333,20 +331,18 @@ class platform::drbd::etcd (
$drbd_manage = undef
}
if $::platform::kubernetes::params::enabled {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
@ -363,8 +359,6 @@ class platform::drbd::dockerdistribution::params (
class platform::drbd::dockerdistribution ()
inherits ::platform::drbd::dockerdistribution::params {
include ::platform::kubernetes::params
if str2bool($::is_initial_config_primary) {
$drbd_primary = true
$drbd_initial = true
@ -377,20 +371,18 @@ class platform::drbd::dockerdistribution ()
$drbd_manage = undef
}
if $::platform::kubernetes::params::enabled {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}

View File

@ -67,20 +67,15 @@ class platform::etcd::init
class platform::etcd
inherits ::platform::etcd::params {
include ::platform::kubernetes::params
Class['::platform::drbd::etcd'] -> Class[$name]
if $::platform::kubernetes::params::enabled {
include ::platform::etcd::datadir
include ::platform::etcd::setup
include ::platform::etcd::init
include ::platform::etcd::datadir
include ::platform::etcd::setup
include ::platform::etcd::init
Class['::platform::etcd::datadir']
-> Class['::platform::etcd::setup']
-> Class['::platform::etcd::init']
}
Class['::platform::etcd::datadir']
-> Class['::platform::etcd::setup']
-> Class['::platform::etcd::init']
}
class platform::etcd::datadir

View File

@ -178,18 +178,14 @@ class platform::filesystem::docker::params (
class platform::filesystem::docker
inherits ::platform::filesystem::docker::params {
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options,
fs_use_all => $fs_use_all,
mode => '0711',
}
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options,
fs_use_all => $fs_use_all,
mode => '0711',
}
}
@ -219,33 +215,25 @@ class platform::filesystem::img_conversions
class platform::filesystem::storage {
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
class {'platform::filesystem::docker::params' :
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
Class['::platform::lvm::vg::cgts_vg'] -> Class['::platform::filesystem::docker']
class {'platform::filesystem::docker::params' :
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
Class['::platform::lvm::vg::cgts_vg'] -> Class['::platform::filesystem::docker']
}
class platform::filesystem::compute {
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
class {'platform::filesystem::docker::params' :
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
Class['::platform::lvm::vg::cgts_vg'] -> Class['::platform::filesystem::docker']
class {'platform::filesystem::docker::params' :
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
Class['::platform::lvm::vg::cgts_vg'] -> Class['::platform::filesystem::docker']
}
class platform::filesystem::controller {

View File

@ -143,9 +143,6 @@ class platform::haproxy::runtime {
}
include ::openstack::keystone::haproxy
include ::openstack::neutron::haproxy
if $::platform::kubernetes::params::enabled != true {
include ::openstack::nova::haproxy
}
include ::openstack::glance::haproxy
include ::openstack::cinder::haproxy
include ::openstack::heat::haproxy

View File

@ -6,143 +6,118 @@ class platform::helm::repository::params(
class platform::helm
inherits ::platform::helm::repository::params {
include ::platform::kubernetes::params
include ::platform::docker::params
if $::platform::kubernetes::params::enabled {
file {$source_helm_repo_dir:
ensure => directory,
path => $source_helm_repo_dir,
owner => 'www',
require => User['www']
}
file {$source_helm_repo_dir:
ensure => directory,
path => $source_helm_repo_dir,
owner => 'www',
require => User['www']
}
-> file {$target_helm_repo_dir:
ensure => directory,
path => $target_helm_repo_dir,
owner => 'www',
require => User['www']
}
-> file {$target_helm_repo_dir:
ensure => directory,
path => $target_helm_repo_dir,
owner => 'www',
require => User['www']
}
if (str2bool($::is_initial_config) and $::personality == 'controller') {
if (str2bool($::is_initial_config) and $::personality == 'controller') {
if str2bool($::is_initial_config_primary) {
if $::platform::docker::params::gcr_registry {
$gcr_registry = $::platform::docker::params::gcr_registry
} else {
$gcr_registry = 'gcr.io'
}
if $::platform::docker::params::quay_registry {
$quay_registry = $::platform::docker::params::quay_registry
} else {
$quay_registry = 'quay.io'
}
Class['::platform::kubernetes::master']
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'load tiller docker image':
command => "docker image pull ${gcr_registry}/kubernetes-helm/tiller:v2.12.1",
logoutput => true,
}
# TODO(tngo): If and when tiller image is upversioned, please ensure armada compatibility as part of the test
-> exec { 'load armada docker image':
command => "docker image pull ${quay_registry}/airshipit/armada:f807c3a1ec727c883c772ffc618f084d960ed5c9",
logoutput => true,
}
-> exec { 'create service account for tiller':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount --namespace kube-system tiller',
logoutput => true,
}
-> exec { 'create cluster role binding for tiller service account':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller', # lint:ignore:140chars
logoutput => true,
}
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => "helm init --skip-refresh --service-account tiller --node-selectors \"node-role.kubernetes.io/master\"=\"\" --tiller-image=${gcr_registry}/kubernetes-helm/tiller:v2.12.1", # lint:ignore:140chars
logoutput => true,
user => 'wrsroot',
group => 'wrs',
require => User['wrsroot']
}
exec { "bind mount ${target_helm_repo_dir}":
command => "mount -o bind -t ext4 ${source_helm_repo_dir} ${target_helm_repo_dir}",
require => Exec['add local starlingx helm repo']
}
# it needs to create the index file after the bind mount, otherwise
# helm repo could not be updated until application-upload adds index
-> exec { 'generate helm repo index on source':
command => "helm repo index ${source_helm_repo_dir}",
logoutput => true,
user => 'www',
group => 'www',
require => User['www']
}
if str2bool($::is_initial_config_primary) {
if $::platform::docker::params::gcr_registry {
$gcr_registry = $::platform::docker::params::gcr_registry
} else {
Class['::platform::kubernetes::master']
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => 'helm init --skip-refresh --client-only',
logoutput => true,
user => 'wrsroot',
group => 'wrs',
require => User['wrsroot']
}
$gcr_registry = 'gcr.io'
}
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repo_dir], Exec['initialize helm']],
command => 'systemctl restart lighttpd.service',
if $::platform::docker::params::quay_registry {
$quay_registry = $::platform::docker::params::quay_registry
} else {
$quay_registry = 'quay.io'
}
Class['::platform::kubernetes::master']
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'load tiller docker image':
command => "docker image pull ${gcr_registry}/kubernetes-helm/tiller:v2.12.1",
logoutput => true,
}
-> exec { 'generate helm repo index on target':
command => "helm repo index ${target_helm_repo_dir}",
# TODO(tngo): If and when tiller image is upversioned, please ensure armada compatibility as part of the test
-> exec { 'load armada docker image':
command => "docker image pull ${quay_registry}/airshipit/armada:f807c3a1ec727c883c772ffc618f084d960ed5c9",
logoutput => true,
}
-> exec { 'create service account for tiller':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount --namespace kube-system tiller',
logoutput => true,
}
-> exec { 'create cluster role binding for tiller service account':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller', # lint:ignore:140chars
logoutput => true,
}
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => "helm init --skip-refresh --service-account tiller --node-selectors \"node-role.kubernetes.io/master\"=\"\" --tiller-image=${gcr_registry}/kubernetes-helm/tiller:v2.12.1", # lint:ignore:140chars
logoutput => true,
user => 'wrsroot',
group => 'wrs',
require => User['wrsroot']
}
exec { "bind mount ${target_helm_repo_dir}":
command => "mount -o bind -t ext4 ${source_helm_repo_dir} ${target_helm_repo_dir}",
require => Exec['add local starlingx helm repo']
}
# it needs to create the index file after the bind mount, otherwise
# helm repo could not be updated until application-upload adds index
-> exec { 'generate helm repo index on source':
command => "helm repo index ${source_helm_repo_dir}",
logoutput => true,
user => 'www',
group => 'www',
require => User['www']
}
-> exec { 'add local starlingx helm repo':
before => Exec['Stop lighttpd'],
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/wrsroot'],
command => "helm repo add starlingx http://127.0.0.1:${port}/helm_charts",
} else {
Class['::platform::kubernetes::master']
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => 'helm init --skip-refresh --client-only',
logoutput => true,
user => 'wrsroot',
group => 'wrs',
require => User['wrsroot']
}
}
}
}
class platform::helm::runtime
{
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
include ::platform::users
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repo_dir], Exec['initialize helm']],
command => 'systemctl restart lighttpd.service',
logoutput => true,
}
exec { 'update local starlingx helm repo':
-> exec { 'generate helm repo index on target':
command => "helm repo index ${target_helm_repo_dir}",
logoutput => true,
user => 'www',
group => 'www',
require => User['www']
}
-> exec { 'add local starlingx helm repo':
before => Exec['Stop lighttpd'],
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/wrsroot'],
command => "helm repo add starlingx http://127.0.0.1:${port}/helm_charts",
logoutput => true,
@ -152,3 +127,20 @@ class platform::helm::runtime
}
}
}
class platform::helm::runtime
{
include ::platform::users
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'update local starlingx helm repo':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/wrsroot'],
command => "helm repo add starlingx http://127.0.0.1:${port}/helm_charts",
logoutput => true,
user => 'wrsroot',
group => 'wrs',
require => User['wrsroot']
}
}

View File

@ -1,5 +1,5 @@
class platform::kubernetes::params (
$enabled = false,
$enabled = true,
$pod_network_cidr = undef,
$service_network_cidr = undef,
$apiserver_advertise_address = undef,
@ -293,20 +293,18 @@ class platform::kubernetes::master::init
class platform::kubernetes::master
inherits ::platform::kubernetes::params {
if $enabled {
contain ::platform::kubernetes::kubeadm
contain ::platform::kubernetes::master::init
contain ::platform::kubernetes::firewall
contain ::platform::kubernetes::kubeadm
contain ::platform::kubernetes::master::init
contain ::platform::kubernetes::firewall
Class['::platform::etcd'] -> Class[$name]
Class['::platform::docker::config'] -> Class[$name]
# Ensure DNS is configured as name resolution is required when
# kubeadm init is run.
Class['::platform::dns'] -> Class[$name]
Class['::platform::kubernetes::kubeadm']
-> Class['::platform::kubernetes::master::init']
-> Class['::platform::kubernetes::firewall']
}
Class['::platform::etcd'] -> Class[$name]
Class['::platform::docker::config'] -> Class[$name]
# Ensure DNS is configured as name resolution is required when
# kubeadm init is run.
Class['::platform::dns'] -> Class[$name]
Class['::platform::kubernetes::kubeadm']
-> Class['::platform::kubernetes::master::init']
-> Class['::platform::kubernetes::firewall']
}
class platform::kubernetes::worker::params (
@ -356,7 +354,7 @@ class platform::kubernetes::worker
# Worker configuration is not required on AIO hosts, since the master
# will already be configured and includes support for running pods.
if $enabled and $::personality != 'controller' {
if $::personality != 'controller' {
contain ::platform::kubernetes::kubeadm
contain ::platform::kubernetes::worker::init
@ -364,18 +362,16 @@ class platform::kubernetes::worker
-> Class['::platform::kubernetes::worker::init']
}
if $enabled {
file { '/var/run/.disable_worker_services':
ensure => file,
replace => no,
}
# TODO: The following exec is a workaround. Once kubernetes becomes the
# default installation, /etc/pmon.d/libvirtd.conf needs to be removed from
# the load.
exec { 'Update PMON libvirtd.conf':
command => "/bin/sed -i 's#mode = passive#mode = ignore #' /etc/pmon.d/libvirtd.conf",
onlyif => '/usr/bin/test -e /etc/pmon.d/libvirtd.conf'
}
file { '/var/run/.disable_worker_services':
ensure => file,
replace => no,
}
# TODO: The following exec is a workaround. Once kubernetes becomes the
# default installation, /etc/pmon.d/libvirtd.conf needs to be removed from
# the load.
exec { 'Update PMON libvirtd.conf':
command => "/bin/sed -i 's#mode = passive#mode = ignore #' /etc/pmon.d/libvirtd.conf",
onlyif => '/usr/bin/test -e /etc/pmon.d/libvirtd.conf'
}
}

View File

@ -8,7 +8,6 @@ class platform::nfv::params (
class platform::nfv {
include ::platform::params
include ::platform::amqp::params
include ::platform::kubernetes::params
group { 'nfv':
ensure => 'present',
@ -35,17 +34,7 @@ class platform::nfv {
include ::nfv
include ::nfv::vim
if !$::platform::kubernetes::params::enabled {
class { '::nfv::nfvi':
rabbit_host => $::platform::amqp::params::host,
rabbit_port => $::platform::amqp::params::port,
rabbit_userid => $::platform::amqp::params::auth_user,
rabbit_password => $::platform::amqp::params::auth_password,
}
} else {
include ::nfv::nfvi
}
include ::nfv::nfvi
}

View File

@ -15,9 +15,6 @@ class platform::sm
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
include ::platform::kubernetes::params
$kubernetes_enabled = $::platform::kubernetes::params::enabled
include ::platform::network::pxeboot::params
if $::platform::network::pxeboot::params::interface_name {
$pxeboot_ip_interface = $::platform::network::pxeboot::params::interface_name
@ -33,15 +30,11 @@ class platform::sm
$mgmt_ip_param_ip = $::platform::network::mgmt::params::controller_address
$mgmt_ip_param_mask = $::platform::network::mgmt::params::subnet_prefixlen
if $kubernetes_enabled {
# Repurposing the infra interface for cluster-host interface
include ::platform::network::cluster_host::params
$infra_ip_interface = $::platform::network::cluster_host::params::interface_name
$cluster_host_ip_param_ip = $::platform::network::cluster_host::params::controller_address
$cluster_host_ip_param_mask = $::platform::network::cluster_host::params::subnet_prefixlen
} else {
$infra_ip_interface = $::platform::network::infra::params::interface_name
}
# Repurposing the infra interface for cluster-host interface
include ::platform::network::cluster_host::params
$infra_ip_interface = $::platform::network::cluster_host::params::interface_name
$cluster_host_ip_param_ip = $::platform::network::cluster_host::params::controller_address
$cluster_host_ip_param_mask = $::platform::network::cluster_host::params::subnet_prefixlen
include ::platform::network::oam::params
$oam_ip_interface = $::platform::network::oam::params::interface_name
@ -140,36 +133,16 @@ class platform::sm
$platform_nfs_ip_network_url = $::platform::network::mgmt::params::subnet_network_url
# CGCS NFS network is over the infrastructure network if configured
if $kubernetes_enabled {
$cgcs_nfs_ip_interface = $::platform::network::mgmt::params::interface_name
$cgcs_nfs_ip_param_ip = $::platform::network::mgmt::params::cgcs_nfs_address
$cgcs_nfs_ip_network_url = $::platform::network::mgmt::params::subnet_network_url
$cgcs_nfs_ip_param_mask = $::platform::network::mgmt::params::subnet_prefixlen
$cgcs_nfs_ip_interface = $::platform::network::mgmt::params::interface_name
$cgcs_nfs_ip_param_ip = $::platform::network::mgmt::params::cgcs_nfs_address
$cgcs_nfs_ip_network_url = $::platform::network::mgmt::params::subnet_network_url
$cgcs_nfs_ip_param_mask = $::platform::network::mgmt::params::subnet_prefixlen
# Re-using cinder-ip for cluster-host-ip for now
# This will be changed when the cluster-host-ip resource is added to SM
$cinder_ip_interface = $::platform::network::cluster_host::params::interface_name
$cinder_ip_param_ip = $::platform::network::cluster_host::params::controller_address
$cinder_ip_param_mask = $::platform::network::cluster_host::params::subnet_prefixlen
} else {
if $infra_ip_interface {
$cgcs_nfs_ip_interface = $::platform::network::infra::params::interface_name
$cgcs_nfs_ip_param_ip = $::platform::network::infra::params::cgcs_nfs_address
$cgcs_nfs_ip_network_url = $::platform::network::infra::params::subnet_network_url
$cgcs_nfs_ip_param_mask = $::platform::network::infra::params::subnet_prefixlen
$cinder_ip_interface = $::platform::network::infra::params::interface_name
$cinder_ip_param_mask = $::platform::network::infra::params::subnet_prefixlen
} else {
$cgcs_nfs_ip_interface = $::platform::network::mgmt::params::interface_name
$cgcs_nfs_ip_param_ip = $::platform::network::mgmt::params::cgcs_nfs_address
$cgcs_nfs_ip_network_url = $::platform::network::mgmt::params::subnet_network_url
$cgcs_nfs_ip_param_mask = $::platform::network::mgmt::params::subnet_prefixlen
$cinder_ip_interface = $::platform::network::mgmt::params::interface_name
$cinder_ip_param_mask = $::platform::network::mgmt::params::subnet_prefixlen
}
}
# Re-using cinder-ip for cluster-host-ip for now
# This will be changed when the cluster-host-ip resource is added to SM
$cinder_ip_interface = $::platform::network::cluster_host::params::interface_name
$cinder_ip_param_ip = $::platform::network::cluster_host::params::controller_address
$cinder_ip_param_mask = $::platform::network::cluster_host::params::subnet_prefixlen
$platform_nfs_subnet_url = "${platform_nfs_ip_network_url}/${platform_nfs_ip_param_mask}"
$cgcs_nfs_subnet_url = "${cgcs_nfs_ip_network_url}/${cgcs_nfs_ip_param_mask}"
@ -229,9 +202,6 @@ class platform::sm
include ::openstack::cinder::params
$cinder_service_enabled = $::openstack::cinder::params::service_enabled
$cinder_region_name = $::openstack::cinder::params::region_name
if $kubernetes_enabled != true {
$cinder_ip_param_ip = $::openstack::cinder::params::cinder_address
}
$cinder_backends = $::openstack::cinder::params::enabled_backends
$cinder_drbd_resource = $::openstack::cinder::params::drbd_resource
$cinder_vg_name = $::openstack::cinder::params::cinder_vg_name
@ -270,10 +240,8 @@ class platform::sm
$hostunit = '0'
$management_my_unit_ip = $::platform::network::mgmt::params::controller0_address
$oam_my_unit_ip = $::platform::network::oam::params::controller_address
if $kubernetes_enabled {
# Repurposing the infra interface for cluster-host interface
$infra_my_unit_ip = $::platform::network::cluster_host::params::controller_address
}
# Repurposing the infra interface for cluster-host interface
$infra_my_unit_ip = $::platform::network::cluster_host::params::controller_address
} else {
case $::hostname {
$controller_0_hostname: {
@ -282,14 +250,9 @@ class platform::sm
$management_peer_unit_ip = $::platform::network::mgmt::params::controller1_address
$oam_my_unit_ip = $::platform::network::oam::params::controller0_address
$oam_peer_unit_ip = $::platform::network::oam::params::controller1_address
if $kubernetes_enabled {
# Repurposing the infra interface for cluster-host interface
$infra_my_unit_ip = $::platform::network::cluster_host::params::controller0_address
$infra_peer_unit_ip = $::platform::network::cluster_host::params::controller1_address
} else {
$infra_my_unit_ip = $::platform::network::infra::params::controller0_address
$infra_peer_unit_ip = $::platform::network::infra::params::controller1_address
}
# Repurposing the infra interface for cluster-host interface
$infra_my_unit_ip = $::platform::network::cluster_host::params::controller0_address
$infra_peer_unit_ip = $::platform::network::cluster_host::params::controller1_address
}
$controller_1_hostname: {
$hostunit = '1'
@ -297,14 +260,9 @@ class platform::sm
$management_peer_unit_ip = $::platform::network::mgmt::params::controller0_address
$oam_my_unit_ip = $::platform::network::oam::params::controller1_address
$oam_peer_unit_ip = $::platform::network::oam::params::controller0_address
if $kubernetes_enabled {
# Repurposing the infra interface for cluster-host interface
$infra_my_unit_ip = $::platform::network::cluster_host::params::controller1_address
$infra_peer_unit_ip = $::platform::network::cluster_host::params::controller0_address
} else {
$infra_my_unit_ip = $::platform::network::infra::params::controller1_address
$infra_peer_unit_ip = $::platform::network::infra::params::controller0_address
}
# Repurposing the infra interface for cluster-host interface
$infra_my_unit_ip = $::platform::network::cluster_host::params::controller1_address
$infra_peer_unit_ip = $::platform::network::cluster_host::params::controller0_address
}
default: {
$hostunit = '2'
@ -327,21 +285,12 @@ class platform::sm
# Workaround for the time being to prevent SM from enabling the openstack
# services when kubernetes is enabled to avoid making changes to individual
# openstack manifests
if $kubernetes_enabled {
$heat_service_enabled = false
$murano_configured = false
$ironic_configured = false
$magnum_configured = false
$gnocchi_enabled = false
$panko_enabled = false
} else {
$heat_service_enabled = $::openstack::heat::params::service_enabled
$murano_configured = $::openstack::murano::params::service_enabled
$ironic_configured = $::openstack::ironic::params::service_enabled
$magnum_configured = $::openstack::magnum::params::service_enabled
$gnocchi_enabled = $::openstack::gnocchi::params::service_enabled
$panko_enabled = $::openstack::panko::params::service_enabled
}
$heat_service_enabled = false
$murano_configured = false
$ironic_configured = false
$magnum_configured = false
$gnocchi_enabled = false
$panko_enabled = false
# lint:ignore:140chars
@ -367,10 +316,8 @@ class platform::sm
command => "sm-configure interface controller management-interface ${mgmt_ip_multicast} ${management_my_unit_ip} 2222 2223 \"\" 2222 2223",
}
if $kubernetes_enabled {
exec { 'Configure Cluster Host Interface':
command => "sm-configure interface controller infrastructure-interface \"\" ${infra_my_unit_ip} 2222 2223 \"\" 2222 2223",
}
exec { 'Configure Cluster Host Interface':
command => "sm-configure interface controller infrastructure-interface \"\" ${infra_my_unit_ip} 2222 2223 \"\" 2222 2223",
}
} else {
@ -380,10 +327,8 @@ class platform::sm
exec { 'Configure Management Interface':
command => "sm-configure interface controller management-interface ${mgmt_ip_multicast} ${management_my_unit_ip} 2222 2223 ${management_peer_unit_ip} 2222 2223",
}
if $kubernetes_enabled or $infra_ip_interface {
exec { 'Configure Infrastructure Interface':
command => "sm-configure interface controller infrastructure-interface ${infra_ip_multicast} ${infra_my_unit_ip} 2222 2223 ${infra_peer_unit_ip} 2222 2223",
}
exec { 'Configure Infrastructure Interface':
command => "sm-configure interface controller infrastructure-interface ${infra_ip_multicast} ${infra_my_unit_ip} 2222 2223 ${infra_peer_unit_ip} 2222 2223",
}
}
@ -449,38 +394,23 @@ class platform::sm
command => "sm-configure service_instance rabbit rabbit \"server=${rabbitmq_server},ctl=${rabbitmqctl},pid_file=${rabbit_pid},nodename=${rabbit_node_name},mnesia_base=${rabbit_mnesia_base},ip=${mgmt_ip_param_ip}\"",
}
if $kubernetes_enabled {
exec { 'Provision Docker Distribution FS in SM (service-group-member dockerdistribution-fs)':
command => 'sm-provision service-group-member controller-services dockerdistribution-fs',
}
-> exec { 'Provision Docker Distribution FS in SM (service dockerdistribution-fs)':
command => 'sm-provision service dockerdistribution-fs',
}
-> exec { 'Provision Docker Distribution DRBD in SM (service-group-member drbd-dockerdistribution)':
command => 'sm-provision service-group-member controller-services drbd-dockerdistribution',
}
-> exec { 'Provision Docker Distribution DRBD in SM (service drbd-dockerdistribution)':
command => 'sm-provision service drbd-dockerdistribution',
}
-> exec { 'Configure Docker Distribution DRBD':
command => "sm-configure service_instance drbd-dockerdistribution drbd-dockerdistribution:${hostunit} \"drbd_resource=${dockerdistribution_drbd_resource}\"",
}
-> exec { 'Configure Docker Distribution FileSystem':
command => "sm-configure service_instance dockerdistribution-fs dockerdistribution-fs \"device=${dockerdistribution_fs_device},directory=${dockerdistribution_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
} else {
exec { 'Deprovision Docker Distribution FS in SM (service-group-member dockerdistribution-fs)':
command => 'sm-deprovision service-group-member controller-services dockerdistribution-fs',
}
-> exec { 'Deprovision Docker Distribution FS in SM (service dockerdistribution-fs)':
command => 'sm-deprovision service dockerdistribution-fs',
}
-> exec { 'Deprovision Docker Distribution DRBD in SM (service-group-member drbd-dockerdistribution)':
command => 'sm-deprovision service-group-member controller-services drbd-dockerdistribution',
}
-> exec { 'Deprovision Docker Distribution DRBD in SM (service drbd-dockerdistribution)':
command => 'sm-deprovision service drbd-dockerdistribution',
}
exec { 'Provision Docker Distribution FS in SM (service-group-member dockerdistribution-fs)':
command => 'sm-provision service-group-member controller-services dockerdistribution-fs',
}
-> exec { 'Provision Docker Distribution FS in SM (service dockerdistribution-fs)':
command => 'sm-provision service dockerdistribution-fs',
}
-> exec { 'Provision Docker Distribution DRBD in SM (service-group-member drbd-dockerdistribution)':
command => 'sm-provision service-group-member controller-services drbd-dockerdistribution',
}
-> exec { 'Provision Docker Distribution DRBD in SM (service drbd-dockerdistribution)':
command => 'sm-provision service drbd-dockerdistribution',
}
-> exec { 'Configure Docker Distribution DRBD':
command => "sm-configure service_instance drbd-dockerdistribution drbd-dockerdistribution:${hostunit} \"drbd_resource=${dockerdistribution_drbd_resource}\"",
}
-> exec { 'Configure Docker Distribution FileSystem':
command => "sm-configure service_instance dockerdistribution-fs dockerdistribution-fs \"device=${dockerdistribution_fs_device},directory=${dockerdistribution_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure CGCS DRBD':
@ -518,33 +448,22 @@ class platform::sm
}
# Configure helm chart repository
if $kubernetes_enabled {
exec { 'Provision Helm Chart Repository FS in SM (service-group-member helmrepository-fs)':
command => 'sm-provision service-group-member controller-services helmrepository-fs',
}
-> exec { 'Provision Helm Chart Repository FS in SM (service helmrepository-fs)':
command => 'sm-provision service helmrepository-fs',
}
-> exec { 'Configure Helm Chart Repository FileSystem':
command => "sm-configure service_instance helmrepository-fs helmrepository-fs \"rmon_rsc_name=helm-charts-storage,device=${helmrepo_fs_source_dir},directory=${helmrepo_fs_target_dir},options=bind,noatime,nodiratime,fstype=ext4,check_level=20\"",
}
} else {
exec { 'Deprovision Helm Chart Repository FS in SM (service-group-member helmrepository-fs)':
command => 'sm-deprovision service-group-member controller-services helmrepository-fs',
}
-> exec { 'Deprovision Helm Chart Repository FS in SM (service helmrepository-fs)':
command => 'sm-deprovision service helmrepository-fs',
}
exec { 'Provision Helm Chart Repository FS in SM (service-group-member helmrepository-fs)':
command => 'sm-provision service-group-member controller-services helmrepository-fs',
}
-> exec { 'Provision Helm Chart Repository FS in SM (service helmrepository-fs)':
command => 'sm-provision service helmrepository-fs',
}
-> exec { 'Configure Helm Chart Repository FileSystem':
command => "sm-configure service_instance helmrepository-fs helmrepository-fs \"rmon_rsc_name=helm-charts-storage,device=${helmrepo_fs_source_dir},directory=${helmrepo_fs_target_dir},options=bind,noatime,nodiratime,fstype=ext4,check_level=20\"",
}
if $kubernetes_enabled {
exec { 'Configure ETCD DRBD':
command => "sm-configure service_instance drbd-etcd drbd-etcd:${hostunit} drbd_resource=${etcd_drbd_resource}",
}
exec { 'Configure ETCD DRBD':
command => "sm-configure service_instance drbd-etcd drbd-etcd:${hostunit} drbd_resource=${etcd_drbd_resource}",
}
exec { 'Configure ETCD DRBD FileSystem':
command => "sm-configure service_instance etcd-fs etcd-fs \"device=${etcd_fs_device},directory=${etcd_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure ETCD DRBD FileSystem':
command => "sm-configure service_instance etcd-fs etcd-fs \"device=${etcd_fs_device},directory=${etcd_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
if $system_mode == 'duplex-direct' or $system_mode == 'simplex' {
@ -557,6 +476,7 @@ class platform::sm
}
}
# TODO: region code needs to be revisited
if $region_config {
# In a default Multi-Region configuration, Keystone is running as a
# shared service in the Primary Region so need to deprovision that
@ -610,12 +530,9 @@ class platform::sm
}
}
}
} elsif $kubernetes_enabled {
$configure_keystone = true
$configure_glance = false
} else {
$configure_keystone = true
$configure_glance = true
$configure_glance = false
}
if $configure_keystone {
@ -808,28 +725,27 @@ class platform::sm
}
}
if $kubernetes_enabled {
# Re-using cinder-ip for cluster-host-ip for now
# This will be changed when the cluster-host-ip resource is added to SM
exec { 'Configure Cinder IP in SM (service-group-member cinder-ip)':
command =>
'sm-provision service-group-member controller-services cinder-ip',
}
-> exec { 'Configure Cinder IP in SM (service cinder-ip)':
command => 'sm-provision service cinder-ip',
}
# Re-using cinder-ip for cluster-host-ip for now
# This will be changed when the cluster-host-ip resource is added to SM
exec { 'Configure Cinder IP in SM (service-group-member cinder-ip)':
command =>
'sm-provision service-group-member controller-services cinder-ip',
}
-> exec { 'Configure Cinder IP in SM (service cinder-ip)':
command => 'sm-provision service cinder-ip',
}
if $system_mode == 'duplex-direct' or $system_mode == 'simplex' {
exec { 'Configure Cinder IP service instance':
command => "sm-configure service_instance cinder-ip cinder-ip \"ip=${cinder_ip_param_ip},cidr_netmask=${cinder_ip_param_mask},nic=${cinder_ip_interface},arp_count=7,dc=yes\"",
}
} else {
exec { 'Configure Cinder IP service instance':
command => "sm-configure service_instance cinder-ip cinder-ip \"ip=${cinder_ip_param_ip},cidr_netmask=${cinder_ip_param_mask},nic=${cinder_ip_interface},arp_count=7\"",
}
if $system_mode == 'duplex-direct' or $system_mode == 'simplex' {
exec { 'Configure Cinder IP service instance':
command => "sm-configure service_instance cinder-ip cinder-ip \"ip=${cinder_ip_param_ip},cidr_netmask=${cinder_ip_param_mask},nic=${cinder_ip_interface},arp_count=7,dc=yes\"",
}
} else {
exec { 'Configure Cinder IP service instance':
command => "sm-configure service_instance cinder-ip cinder-ip \"ip=${cinder_ip_param_ip},cidr_netmask=${cinder_ip_param_mask},nic=${cinder_ip_interface},arp_count=7\"",
}
}
# TODO: revisit region mode
if $region_config {
if $neutron_region_name != $region_2_name {
$configure_neturon = false
@ -843,10 +759,8 @@ class platform::sm
} else {
$configure_neturon = true
}
} elsif $kubernetes_enabled {
$configure_neturon = false
} else {
$configure_neturon = true
$configure_neturon = false
}
if $configure_neturon {
@ -855,103 +769,75 @@ class platform::sm
}
}
if $kubernetes_enabled != true {
# TODO: this entire section needs to be removed from SM.
# After these are removed from SM, this entire section of
# deprovision calls will not be needed
# Deprovision Openstack services if Kubernetes Config is enabled
exec { 'Configure OpenStack - Nova API':
command => "sm-configure service_instance nova-api nova-api \"config=/etc/nova/nova.conf,user=root,os_username=${os_username},os_project_name=${os_project_name},os_user_domain_name=${os_user_domain_name},os_project_domain_name=${os_project_domain_name},keystone_get_token_url=${os_auth_url}/tokens\"",
}
# Deprovision Nova Services
exec { 'Deprovision OpenStack - Nova API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-api',
}
-> exec { 'Deprovision OpenStack - Nova API(service)':
command => 'sm-deprovision service nova-api',
}
exec { 'Configure OpenStack - Nova Placement API':
command => "sm-configure service_instance nova-placement-api nova-placement-api \"config=/etc/nova/nova.conf,user=root,os_username=${os_username},os_project_name=${os_project_name},os_user_domain_name=${os_user_domain_name},os_project_domain_name=${os_project_domain_name},keystone_get_token_url=${os_auth_url}/tokens,host=${mgmt_ip_param_ip}\"",
}
exec { 'Deprovision OpenStack - Nova API Proxy (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-api-proxy',
}
-> exec { 'Deprovision OpenStack - Nova API Proxy(service)':
command => 'sm-deprovision service nova-api-proxy',
}
exec { 'Configure OpenStack - Nova Scheduler':
command => "sm-configure service_instance nova-scheduler nova-scheduler \"config=/etc/nova/nova.conf,database_server_port=${db_server_port},amqp_server_port=${amqp_server_port}\"",
}
exec { 'Deprovision OpenStack - Nova Placement API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-placement-api',
}
-> exec { 'Deprovision OpenStack - Nova Placement API(service)':
command => 'sm-deprovision service nova-placement-api',
}
exec { 'Configure OpenStack - Nova Conductor':
command => "sm-configure service_instance nova-conductor nova-conductor \"config=/etc/nova/nova.conf,database_server_port=${db_server_port},amqp_server_port=${amqp_server_port}\"",
}
exec { 'Deprovision OpenStack - Nova Scheduler (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-scheduler',
}
-> exec { 'Deprovision OpenStack - Nova Scheduler(service)':
command => 'sm-deprovision service nova-scheduler',
}
exec { 'Configure OpenStack - Nova Console Authorization':
command => "sm-configure service_instance nova-console-auth nova-console-auth \"config=/etc/nova/nova.conf,user=root,database_server_port=${db_server_port},amqp_server_port=${amqp_server_port}\"",
}
exec { 'Deprovision OpenStack - Nova Conductor (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-conductor',
}
-> exec { 'Deprovision OpenStack - Nova Conductor(service)':
command => 'sm-deprovision service nova-conductor',
}
exec { 'Configure OpenStack - Nova NoVNC':
command => "sm-configure service_instance nova-novnc nova-novnc \"config=/etc/nova/nova.conf,user=root,console_port=${novnc_console_port}\"",
}
exec { 'Deprovision OpenStack - Nova Console Auth (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-console-auth',
}
-> exec { 'Deprovision OpenStack - Nova Console Auth(service)':
command => 'sm-deprovision service nova-console-auth',
}
exec { 'Configure OpenStack - Ceilometer Agent Notification':
command => "sm-configure service_instance ceilometer-agent-notification ceilometer-agent-notification \"config=/etc/ceilometer/ceilometer.conf\"",
}
} else {
# Deprovision Openstack services if Kubernetes Config is enabled
exec { 'Deprovision OpenStack - Nova NoVNC (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-novnc',
}
-> exec { 'Deprovision OpenStack - Nova NoVNC(service)':
command => 'sm-deprovision service nova-novnc',
}
# Deprovision Nova Services
exec { 'Deprovision OpenStack - Nova API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-api',
}
-> exec { 'Deprovision OpenStack - Nova API(service)':
command => 'sm-deprovision service nova-api',
}
# Deprovision Celiometer
exec { 'Deprovision OpenStack - Ceilometer Agent Notification (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services ceilometer-agent-notification',
}
-> exec { 'Deprovision OpenStack - Ceilometer Agent Notification(service)':
command => 'sm-deprovision service ceilometer-agent-notification',
}
exec { 'Deprovision OpenStack - Nova API Proxy (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-api-proxy',
}
-> exec { 'Deprovision OpenStack - Nova API Proxy(service)':
command => 'sm-deprovision service nova-api-proxy',
}
exec { 'Deprovision OpenStack - Nova Placement API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-placement-api',
}
-> exec { 'Deprovision OpenStack - Nova Placement API(service)':
command => 'sm-deprovision service nova-placement-api',
}
exec { 'Deprovision OpenStack - Nova Scheduler (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-scheduler',
}
-> exec { 'Deprovision OpenStack - Nova Scheduler(service)':
command => 'sm-deprovision service nova-scheduler',
}
exec { 'Deprovision OpenStack - Nova Conductor (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-conductor',
}
-> exec { 'Deprovision OpenStack - Nova Conductor(service)':
command => 'sm-deprovision service nova-conductor',
}
exec { 'Deprovision OpenStack - Nova Console Auth (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-console-auth',
}
-> exec { 'Deprovision OpenStack - Nova Console Auth(service)':
command => 'sm-deprovision service nova-console-auth',
}
exec { 'Deprovision OpenStack - Nova NoVNC (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-novnc',
}
-> exec { 'Deprovision OpenStack - Nova NoVNC(service)':
command => 'sm-deprovision service nova-novnc',
}
# Deprovision Celiometer
exec { 'Deprovision OpenStack - Ceilometer Agent Notification (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services ceilometer-agent-notification',
}
-> exec { 'Deprovision OpenStack - Ceilometer Agent Notification(service)':
command => 'sm-deprovision service ceilometer-agent-notification',
}
# Deprovision Neutron Server
exec { 'Deprovision OpenStack - Neutron Server (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services neutron-server',
}
-> exec { 'Deprovision OpenStack - Neutron Server (service)':
command => 'sm-deprovision service neutron-server',
}
# Deprovision Neutron Server
exec { 'Deprovision OpenStack - Neutron Server (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services neutron-server',
}
-> exec { 'Deprovision OpenStack - Neutron Server (service)':
command => 'sm-deprovision service neutron-server',
}
if $heat_service_enabled {
@ -1275,43 +1161,31 @@ class platform::sm
}
# Configure ETCD for Kubernetes
if $kubernetes_enabled {
exec { 'Provision etcd-fs (service-group-member)':
command => 'sm-provision service-group-member controller-services etcd-fs',
}
-> exec { 'Provision etcd-fs (service)':
command => 'sm-provision service etcd-fs',
}
-> exec { 'Provision drbd-etcd (service-group-member)':
command => 'sm-provision service-group-member controller-services drbd-etcd',
}
-> exec { 'Provision drbd-etcd (service)':
command => 'sm-provision service drbd-etcd',
}
-> exec { 'Provision ETCD (service-group-member)':
command => 'sm-provision service-group-member controller-services etcd',
}
-> exec { 'Provision ETCD (service)':
command => 'sm-provision service etcd',
}
exec { 'Provision etcd-fs (service-group-member)':
command => 'sm-provision service-group-member controller-services etcd-fs',
}
else {
exec { 'Deprovision ETCD (service-group-member)':
command => 'sm-deprovision service-group-member controller-services etcd',
}
-> exec { 'Deprovision ETCD (service)':
command => 'sm-deprovision service etcd',
}
-> exec { 'Provision etcd-fs (service)':
command => 'sm-provision service etcd-fs',
}
-> exec { 'Provision drbd-etcd (service-group-member)':
command => 'sm-provision service-group-member controller-services drbd-etcd',
}
-> exec { 'Provision drbd-etcd (service)':
command => 'sm-provision service drbd-etcd',
}
-> exec { 'Provision ETCD (service-group-member)':
command => 'sm-provision service-group-member controller-services etcd',
}
-> exec { 'Provision ETCD (service)':
command => 'sm-provision service etcd',
}
# Configure Docker Distribution
if $kubernetes_enabled {
exec { 'Provision Docker Distribution (service-group-member)':
command => 'sm-provision service-group-member controller-services docker-distribution',
}
-> exec { 'Provision Docker Distribution (service)':
command => 'sm-provision service docker-distribution',
}
exec { 'Provision Docker Distribution (service-group-member)':
command => 'sm-provision service-group-member controller-services docker-distribution',
}
-> exec { 'Provision Docker Distribution (service)':
command => 'sm-provision service docker-distribution',
}
# Barbican