Merge remote-tracking branch starlingx/master into HEAD

Change-Id: Ib4e64858022a39a6143d51985363513ee6019a6d
Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
Scott Little 2019-02-20 12:02:23 -05:00
commit b09d0898b6
19 changed files with 721 additions and 257 deletions

View File

@ -180,7 +180,8 @@
# fm-rest-api: true # fm-rest-api: true
# fm-mgr: true # fm-mgr: true
sysinv-agent: true sysinv-agent: true
sysinv-api: true # Skip sysinv-api for now, needs more attention
# sysinv-api: true
sysinv-cond: true sysinv-cond: true
mysql: false mysql: false
postgresql: true postgresql: true

View File

@ -162,15 +162,17 @@ function install_cgtsclient {
} }
function install_configutilities { function install_configutilities {
pushd $STXCONFIG_CONFUTILS # We can't use setup_develop as there is no setup.cfg file present for configutilities
sudo python setup.py install --root=/ --install-lib=$PYTHON_SITE_DIR --prefix=/usr --install-data=/usr/share --single-version-externally-managed setup_package $STXCONFIG_CONFUTILS -e
popd
} }
function install_controllerconfig { function install_controllerconfig {
pushd $STXCONFIG_CONTROL # This is a hack to work around the lack of proper global-requirements
sudo python setup.py install --root=/ --install-lib=$PYTHON_SITE_DIR --prefix=/usr --install-data=/usr/share --single-version-externally-managed # setup in these packages
popd pip_install pycrypto
# We can't use setup_develop as there is no setup.cfg file present for controllerconfig
setup_package $STXCONFIG_CONTROL -e
} }
function install_sysinv { function install_sysinv {

View File

@ -7,8 +7,13 @@
STX_CONFIG_NAME=stx-config STX_CONFIG_NAME=stx-config
######### Plugin Specific ########## ######### Plugin Specific ##########
enable_service $STX_CONFIG_NAME sysinv sysinv-api sysinv-cond enable_service $STX_CONFIG_NAME
#define_plugin sysinv
# This must not use any variables to work properly in OpenStack's DevStack playbook
define_plugin stx-config
# This works for Zuul jobs using OpenStack's DevStack roles
plugin_requires stx-config stx-integ
plugin_requires stx-config stx-update
# Handle STX pre-reqs # Handle STX pre-reqs
# stx-integ # stx-integ

View File

@ -48,6 +48,10 @@ data:
replicas: replicas:
error_page: 2 error_page: 2
ingress: 2 ingress: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz
@ -89,6 +93,10 @@ data:
replicas: replicas:
error_page: 2 error_page: 2
ingress: 2 ingress: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz
@ -118,6 +126,12 @@ data:
- type: job - type: job
labels: labels:
app: rbd-provisioner app: rbd-provisioner
values:
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/rbd-provisioner-0.1.0.tgz location: http://172.17.0.1/helm_charts/rbd-provisioner-0.1.0.tgz
@ -187,6 +201,11 @@ data:
prometheus_mysql_exporter: prometheus_mysql_exporter:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane
node_selector_value: enabled node_selector_value: enabled
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/mariadb-0.1.0.tgz location: http://172.17.0.1/helm_charts/mariadb-0.1.0.tgz
@ -360,6 +379,10 @@ data:
pod: pod:
replicas: replicas:
api: 2 api: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/keystone-0.1.0.tgz location: http://172.17.0.1/helm_charts/keystone-0.1.0.tgz
@ -793,6 +816,9 @@ data:
pod: pod:
replicas: replicas:
server: 2 server: 2
user:
neutron:
uid: 0
affinity: affinity:
anti: anti:
type: type:
@ -800,13 +826,13 @@ data:
labels: labels:
agent: agent:
dhcp: dhcp:
node_selector_key: openstack-control-plane node_selector_key: openstack-compute-node
node_selector_value: enabled node_selector_value: enabled
l3: l3:
node_selector_key: openstack-control-plane node_selector_key: openstack-compute-node
node_selector_value: enabled node_selector_value: enabled
metadata: metadata:
node_selector_key: openstack-control-plane node_selector_key: openstack-compute-node
node_selector_value: enabled node_selector_value: enabled
job: job:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane
@ -829,19 +855,73 @@ data:
node_selector_value: enabled node_selector_value: enabled
network: network:
interface: interface:
tunnel: enp0s3 tunnel: docker0
backend:
- openvswitch
- sriov
conf: conf:
neutron: neutron:
DEFAULT: DEFAULT:
l3_ha: true l3_ha: false
min_l3_agents_per_router: 2 min_l3_agents_per_router: 1
max_l3_agents_per_router: 5 max_l3_agents_per_router: 1
l3_ha_network_type: vxlan l3_ha_network_type: vxlan
dhcp_agents_per_network: 2 dhcp_agents_per_network: 1
max_overflow: 64
max_pool_size: 1
idle_timeout: 60
router_status_managed: true
vlan_transparent: true
wsgi_default_pool_size: 100
notify_nova_on_port_data_changes: true
notify_nova_on_port_status_changes: true
control_exchange: neutron
core_plugin: neutron.plugins.ml2.plugin.Ml2Plugin
state_path: /var/run/neutron
syslog_log_facility: local2
use_syslog: true
pnet_audit_enabled: false
driver: messagingv2
enable_proxy_headers_parsing: true
lock_path: /var/run/neutron/lock
log_format: '[%(name)s] %(message)s'
policy_file: /etc/neutron/policy.json
service_plugins: router,network_segment_range
dns_domain: openstacklocal
enable_new_agents: false
allow_automatic_dhcp_failover: true
allow_automatic_l3agent_failover: true
agent:
root_helper: sudo
vhost:
vhost_user_enabled: true
dhcp_agent:
DEFAULT:
enable_isolated_metadata: true
enable_metadata_network: false
interface_driver: openvswitch
resync_interval: 30
l3_agent:
DEFAULT:
agent_mode: dvr_snat
interface_driver: openvswitch
metadata_port: 80
plugins: plugins:
ml2_conf: ml2_conf:
ml2_type_flat: ml2_type_flat:
flat_networks: public flat_networks: public
ml2:
mechanism_drivers: openvswitch,sriovnicswitch,l2population
path_mtu: 0
tenant_network_types: vlan,vxlan
type_drivers: flat,vlan,vxlan
ml2_type_vxlan:
vni_ranges: ''
vxlan_group: ''
ovs_driver:
vhost_user_enabled: true
securitygroup:
firewall_driver: noop
openvswitch_agent: openvswitch_agent:
agent: agent:
tunnel_types: vxlan tunnel_types: vxlan
@ -953,6 +1033,10 @@ data:
user: user:
aodh: aodh:
uid: 0 uid: 0
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
jobs: jobs:
alarms_cleaner: alarms_cleaner:
# daily at the 35 minute mark # daily at the 35 minute mark
@ -1050,6 +1134,11 @@ data:
app:healthcheck: app:healthcheck:
use: egg:oslo.middleware#healthcheck use: egg:oslo.middleware#healthcheck
oslo_config_project: gnocchi oslo_config_project: gnocchi
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/gnocchi-0.1.0.tgz location: http://172.17.0.1/helm_charts/gnocchi-0.1.0.tgz
@ -1090,6 +1179,10 @@ data:
user: user:
panko: panko:
uid: 0 uid: 0
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
jobs: jobs:
events_cleaner: events_cleaner:
# hourly at the 10 minute mark # hourly at the 10 minute mark
@ -1602,6 +1695,11 @@ data:
attributes: attributes:
controller: resource_metadata.controller controller: resource_metadata.controller
switch: resource_metadata.switch switch: resource_metadata.switch
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/ceilometer-0.1.0.tgz location: http://172.17.0.1/helm_charts/ceilometer-0.1.0.tgz

View File

@ -48,6 +48,10 @@ data:
replicas: replicas:
error_page: 2 error_page: 2
ingress: 2 ingress: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz
@ -89,6 +93,10 @@ data:
replicas: replicas:
error_page: 2 error_page: 2
ingress: 2 ingress: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz
@ -118,6 +126,12 @@ data:
- type: job - type: job
labels: labels:
app: rbd-provisioner app: rbd-provisioner
values:
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/rbd-provisioner-0.1.0.tgz location: http://172.17.0.1/helm_charts/rbd-provisioner-0.1.0.tgz
@ -187,6 +201,11 @@ data:
prometheus_mysql_exporter: prometheus_mysql_exporter:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane
node_selector_value: enabled node_selector_value: enabled
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/mariadb-0.1.0.tgz location: http://172.17.0.1/helm_charts/mariadb-0.1.0.tgz
@ -360,6 +379,10 @@ data:
pod: pod:
replicas: replicas:
api: 2 api: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/keystone-0.1.0.tgz location: http://172.17.0.1/helm_charts/keystone-0.1.0.tgz
@ -793,6 +816,9 @@ data:
pod: pod:
replicas: replicas:
server: 2 server: 2
user:
neutron:
uid: 0
affinity: affinity:
anti: anti:
type: type:
@ -800,13 +826,13 @@ data:
labels: labels:
agent: agent:
dhcp: dhcp:
node_selector_key: openstack-control-plane node_selector_key: openstack-compute-node
node_selector_value: enabled node_selector_value: enabled
l3: l3:
node_selector_key: openstack-control-plane node_selector_key: openstack-compute-node
node_selector_value: enabled node_selector_value: enabled
metadata: metadata:
node_selector_key: openstack-control-plane node_selector_key: openstack-compute-node
node_selector_value: enabled node_selector_value: enabled
job: job:
node_selector_key: openstack-control-plane node_selector_key: openstack-control-plane
@ -829,19 +855,73 @@ data:
node_selector_value: enabled node_selector_value: enabled
network: network:
interface: interface:
tunnel: enp0s3 tunnel: docker0
backend:
- openvswitch
- sriov
conf: conf:
neutron: neutron:
DEFAULT: DEFAULT:
l3_ha: true l3_ha: false
min_l3_agents_per_router: 2 min_l3_agents_per_router: 1
max_l3_agents_per_router: 5 max_l3_agents_per_router: 1
l3_ha_network_type: vxlan l3_ha_network_type: vxlan
dhcp_agents_per_network: 2 dhcp_agents_per_network: 1
max_overflow: 64
max_pool_size: 1
idle_timeout: 60
router_status_managed: true
vlan_transparent: true
wsgi_default_pool_size: 100
notify_nova_on_port_data_changes: true
notify_nova_on_port_status_changes: true
control_exchange: neutron
core_plugin: neutron.plugins.ml2.plugin.Ml2Plugin
state_path: /var/run/neutron
syslog_log_facility: local2
use_syslog: true
pnet_audit_enabled: false
driver: messagingv2
enable_proxy_headers_parsing: true
lock_path: /var/run/neutron/lock
log_format: '[%(name)s] %(message)s'
policy_file: /etc/neutron/policy.json
service_plugins: router,network_segment_range
dns_domain: openstacklocal
enable_new_agents: false
allow_automatic_dhcp_failover: true
allow_automatic_l3agent_failover: true
agent:
root_helper: sudo
vhost:
vhost_user_enabled: true
dhcp_agent:
DEFAULT:
enable_isolated_metadata: true
enable_metadata_network: false
interface_driver: openvswitch
resync_interval: 30
l3_agent:
DEFAULT:
agent_mode: dvr_snat
interface_driver: openvswitch
metadata_port: 80
plugins: plugins:
ml2_conf: ml2_conf:
ml2_type_flat: ml2_type_flat:
flat_networks: public flat_networks: public
ml2:
mechanism_drivers: openvswitch,sriovnicswitch,l2population
path_mtu: 0
tenant_network_types: vlan,vxlan
type_drivers: flat,vlan,vxlan
ml2_type_vxlan:
vni_ranges: ''
vxlan_group: ''
ovs_driver:
vhost_user_enabled: true
securitygroup:
firewall_driver: noop
openvswitch_agent: openvswitch_agent:
agent: agent:
tunnel_types: vxlan tunnel_types: vxlan
@ -953,6 +1033,10 @@ data:
user: user:
aodh: aodh:
uid: 0 uid: 0
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
jobs: jobs:
alarms_cleaner: alarms_cleaner:
# daily at the 35 minute mark # daily at the 35 minute mark
@ -1050,6 +1134,11 @@ data:
app:healthcheck: app:healthcheck:
use: egg:oslo.middleware#healthcheck use: egg:oslo.middleware#healthcheck
oslo_config_project: gnocchi oslo_config_project: gnocchi
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/gnocchi-0.1.0.tgz location: http://172.17.0.1/helm_charts/gnocchi-0.1.0.tgz
@ -1090,6 +1179,10 @@ data:
user: user:
panko: panko:
uid: 0 uid: 0
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
jobs: jobs:
events_cleaner: events_cleaner:
# hourly at the 10 minute mark # hourly at the 10 minute mark
@ -1602,6 +1695,11 @@ data:
attributes: attributes:
controller: resource_metadata.controller controller: resource_metadata.controller
switch: resource_metadata.switch switch: resource_metadata.switch
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source: source:
type: tar type: tar
location: http://172.17.0.1/helm_charts/ceilometer-0.1.0.tgz location: http://172.17.0.1/helm_charts/ceilometer-0.1.0.tgz

View File

@ -12,4 +12,6 @@ kind: ServiceAccount
metadata: metadata:
name: {{ .Values.rbac.serviceAccount }} name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
imagePullSecrets:
- name: default-registry-key
{{- end }} {{- end }}

View File

@ -76,9 +76,12 @@ class platform::helm
} }
} else { } else {
exec { 'initialize helm':
Class['::platform::kubernetes::master']
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ], environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => 'helm init --client-only', command => 'helm init --skip-refresh --client-only',
logoutput => true, logoutput => true,
user => 'wrsroot', user => 'wrsroot',
group => 'wrs', group => 'wrs',

View File

@ -16,10 +16,14 @@ class platform::kubernetes::kubeadm {
$iptables_file = "net.bridge.bridge-nf-call-ip6tables = 1 $iptables_file = "net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1" net.bridge.bridge-nf-call-iptables = 1"
# Ensure DNS is configured as name resolution is required when
# kubeadm init is run.
Class['::platform::dns']
# Update iptables config. This is required based on: # Update iptables config. This is required based on:
# https://kubernetes.io/docs/tasks/tools/install-kubeadm # https://kubernetes.io/docs/tasks/tools/install-kubeadm
# This probably belongs somewhere else - initscripts package? # This probably belongs somewhere else - initscripts package?
file { '/etc/sysctl.d/k8s.conf': -> file { '/etc/sysctl.d/k8s.conf':
ensure => file, ensure => file,
content => $iptables_file, content => $iptables_file,
owner => 'root', owner => 'root',

View File

@ -192,6 +192,10 @@ class platform::sm
$os_region_name = $keystone_region $os_region_name = $keystone_region
} }
# Barbican
include ::openstack::barbican::params
$barbican_enabled = $::openstack::barbican::params::service_enabled
$ost_cl_ctrl_host = $::platform::network::mgmt::params::controller_address_url $ost_cl_ctrl_host = $::platform::network::mgmt::params::controller_address_url
include ::platform::client::params include ::platform::client::params
@ -262,9 +266,6 @@ class platform::sm
# Panko # Panko
include ::openstack::panko::params include ::openstack::panko::params
# Barbican
include ::openstack::barbican::params
if $system_mode == 'simplex' { if $system_mode == 'simplex' {
$hostunit = '0' $hostunit = '0'
$management_my_unit_ip = $::platform::network::mgmt::params::controller0_address $management_my_unit_ip = $::platform::network::mgmt::params::controller0_address
@ -333,7 +334,6 @@ class platform::sm
$magnum_configured = false $magnum_configured = false
$gnocchi_enabled = false $gnocchi_enabled = false
$panko_enabled = false $panko_enabled = false
$barbican_enabled = false
} else { } else {
$heat_service_enabled = $::openstack::heat::params::service_enabled $heat_service_enabled = $::openstack::heat::params::service_enabled
$murano_configured = $::openstack::murano::params::service_enabled $murano_configured = $::openstack::murano::params::service_enabled
@ -341,7 +341,6 @@ class platform::sm
$magnum_configured = $::openstack::magnum::params::service_enabled $magnum_configured = $::openstack::magnum::params::service_enabled
$gnocchi_enabled = $::openstack::gnocchi::params::service_enabled $gnocchi_enabled = $::openstack::gnocchi::params::service_enabled
$panko_enabled = $::openstack::panko::params::service_enabled $panko_enabled = $::openstack::panko::params::service_enabled
$barbican_enabled = $::openstack::barbican::params::service_enabled
} }
# lint:ignore:140chars # lint:ignore:140chars
@ -619,6 +618,21 @@ class platform::sm
} }
} }
# Barbican
if $barbican_enabled {
exec { 'Configure OpenStack - Barbican API':
command => "sm-configure service_instance barbican-api barbican-api \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Keystone Listener':
command => "sm-configure service_instance barbican-keystone-listener barbican-keystone-listener \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Worker':
command => "sm-configure service_instance barbican-worker barbican-worker \"config=/etc/barbican/barbican.conf\"",
}
}
if $configure_glance { if $configure_glance {
if !$glance_cached { if !$glance_cached {
exec { 'Configure OpenStack - Glance Registry': exec { 'Configure OpenStack - Glance Registry':
@ -1098,49 +1112,6 @@ class platform::sm
command => "sm-configure service_instance ironic-conductor ironic-conductor \"config=/etc/ironic/ironic.conf,tftproot=${ironic_tftproot}\"", command => "sm-configure service_instance ironic-conductor ironic-conductor \"config=/etc/ironic/ironic.conf,tftproot=${ironic_tftproot}\"",
} }
# Barbican
if $barbican_enabled {
exec { 'Configure OpenStack - Barbican API':
command => "sm-configure service_instance barbican-api barbican-api \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Keystone Listener':
command => "sm-configure service_instance barbican-keystone-listener barbican-keystone-listener \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Worker':
command => "sm-configure service_instance barbican-worker barbican-worker \"config=/etc/barbican/barbican.conf\"",
}
} else {
exec { 'Deprovision OpenStack - Barbican API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-api',
}
-> exec { 'Deprovision OpenStack - Barbican API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-api',
}
exec { 'Deprovision OpenStack - Barbican Keystone Listener (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-keystone-listener',
}
-> exec { 'Deprovision OpenStack - Barbican Keystone Listener (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-keystone-listener',
}
exec { 'Deprovision OpenStack - Barbican Worker (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-worker',
}
-> exec { 'Deprovision OpenStack - Barbican Worker (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-worker',
}
}
exec { 'Configure OpenStack - Nova Compute': exec { 'Configure OpenStack - Nova Compute':
command => "sm-configure service_instance nova-compute nova-compute \"config=/etc/nova/nova-ironic.conf\"", command => "sm-configure service_instance nova-compute nova-compute \"config=/etc/nova/nova-ironic.conf\"",
} }
@ -1337,6 +1308,55 @@ class platform::sm
} }
} }
# Barbican
if $barbican_enabled {
exec { 'Provision OpenStack - Barbican API (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-api',
}
-> exec { 'Provision OpenStack - Barbican API (service)':
command => 'sm-provision service barbican-api',
}
-> exec { 'Provision OpenStack - Barbican Keystone Listener (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-keystone-listener',
}
-> exec { 'Provision OpenStack - Barbican Keystone Listener (service)':
command => 'sm-provision service barbican-keystone-listener',
}
-> exec { 'Provision OpenStack - Barbican Worker (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-worker',
}
-> exec { 'Provision OpenStack - Barbican Worker (service)':
command => 'sm-provision service barbican-worker',
}
} else {
exec { 'Deprovision OpenStack - Barbican API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-api',
}
-> exec { 'Deprovision OpenStack - Barbican API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-api',
}
exec { 'Deprovision OpenStack - Barbican Keystone Listener (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-keystone-listener',
}
-> exec { 'Deprovision OpenStack - Barbican Keystone Listener (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-keystone-listener',
}
exec { 'Deprovision OpenStack - Barbican Worker (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-worker',
}
-> exec { 'Deprovision OpenStack - Barbican Worker (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-worker',
}
}
exec { 'Configure Murano Rabbit': exec { 'Configure Murano Rabbit':
command => "sm-configure service_instance murano-rabbit murano-rabbit \"server=${rabbitmq_server},ctl=${rabbitmqctl},nodename=${murano_rabbit_node_name},mnesia_base=${murano_rabbit_mnesia_base},ip=${oam_ip_param_ip},config_file=${murano_rabbit_config_file},env_config_file=${murano_rabbit_env_config_file},pid_file=${murano_rabbit_pid},dist_port=${murano_rabbit_dist_port}\"", command => "sm-configure service_instance murano-rabbit murano-rabbit \"server=${rabbitmq_server},ctl=${rabbitmqctl},nodename=${murano_rabbit_node_name},mnesia_base=${murano_rabbit_mnesia_base},ip=${oam_ip_param_ip},config_file=${murano_rabbit_config_file},env_config_file=${murano_rabbit_env_config_file},pid_file=${murano_rabbit_pid},dist_port=${murano_rabbit_dist_port}\"",
} }

View File

@ -1,2 +1,2 @@
SRC_DIR="sysinv" SRC_DIR="sysinv"
TIS_PATCH_VER=301 TIS_PATCH_VER=304

View File

@ -923,6 +923,11 @@ class KubeAppNotFound(NotFound):
message = _("No application with name %(name)s.") message = _("No application with name %(name)s.")
class DockerRegistryCredentialNotFound(NotFound):
message = _("Credentials to access local docker registry "
"for user %(name)s could not be found.")
class SDNNotEnabled(SysinvException): class SDNNotEnabled(SysinvException):
message = _("SDN configuration is not enabled.") message = _("SDN configuration is not enabled.")
@ -1055,6 +1060,10 @@ class KubeAppProgressMonitorTimeout(SysinvException):
message = "Armada execution progress monitor timed out." message = "Armada execution progress monitor timed out."
class K8sNamespaceDeleteTimeout(SysinvException):
message = "Namespace %(name)s deletion timeout."
class InvalidEndpoint(SysinvException): class InvalidEndpoint(SysinvException):
message = "The provided endpoint is invalid" message = "The provided endpoint is invalid"

View File

@ -67,3 +67,113 @@ class KubeOperator(object):
except Exception as e: except Exception as e:
LOG.error("Kubernetes exception in kube_get_nodes: %s" % e) LOG.error("Kubernetes exception in kube_get_nodes: %s" % e)
raise raise
def kube_create_namespace(self, namespace):
body = {'metadata': {'name': namespace}}
c = self._get_kubernetesclient()
try:
c.create_namespace(body)
except ApiException as e:
if e.status == httplib.CONFLICT:
# Already exist
LOG.warn("Namespace %s already exist." % namespace)
else:
LOG.error("Failed to create Namespace %s: %s" % (namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in "
"_kube_create_namespace %s: %s" % (namespace, e))
raise
def kube_get_namespace(self, namespace):
c = self._get_kubernetesclient()
try:
c.read_namespace(namespace)
return True
except ApiException as e:
if e.status == httplib.NOT_FOUND:
return False
else:
LOG.error("Failed to get Namespace %s: %s" % (namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in "
"kube_get_namespace %s: %s" % (namespace, e))
raise
def kube_get_secret(self, name, namespace):
c = self._get_kubernetesclient()
try:
c.read_namespaced_secret(name, namespace)
return True
except ApiException as e:
if e.status == httplib.NOT_FOUND:
return False
else:
LOG.error("Failed to get Secret %s under "
"Namespace %s: %s" % (name, namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in kube_get_secret: %s" % e)
raise
def kube_create_secret(self, namespace, body):
c = self._get_kubernetesclient()
try:
c.create_namespaced_secret(namespace, body)
except Exception as e:
LOG.error("Failed to create Secret %s under Namespace %s: "
"%s" % (body['metadata']['name'], namespace, e))
raise
def kube_delete_persistent_volume_claim(self, namespace, **kwargs):
c = self._get_kubernetesclient()
try:
c.delete_collection_namespaced_persistent_volume_claim(
namespace, **kwargs)
except Exception as e:
LOG.error("Failed to delete Persistent Volume Claim "
"under Namespace %s: %s" % (namespace, e))
raise
def kube_delete_secret(self, name, namespace, **kwargs):
body = {}
if kwargs:
body.update(kwargs)
c = self._get_kubernetesclient()
try:
c.delete_namespaced_secret(name, namespace, body)
except ApiException as e:
if e.status == httplib.NOT_FOUND:
LOG.warn("Secret %s under Namespace %s "
"not found." % (name, namespace))
else:
LOG.error("Failed to clean up Secret %s under "
"Namespace %s: %s" % (name, namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in kube_delete_secret: %s" % e)
raise
def kube_delete_namespace(self, namespace, **kwargs):
body = {}
if kwargs:
body.update(kwargs)
c = self._get_kubernetesclient()
try:
c.delete_namespace(namespace, body)
except ApiException as e:
if e.status == httplib.NOT_FOUND:
LOG.warn("Namespace %s not found." % namespace)
else:
LOG.error("Failed to clean up Namespace %s: "
"%s" % (namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in kube_delete_namespace: %s" % e)
raise

View File

@ -9,8 +9,10 @@
""" System Inventory Kubernetes Application Operator.""" """ System Inventory Kubernetes Application Operator."""
import base64
import docker import docker
import grp import grp
import keyring
import os import os
import pwd import pwd
import re import re
@ -57,6 +59,9 @@ INSTALLATION_TIMEOUT = 3600
MAX_DOWNLOAD_THREAD = 20 MAX_DOWNLOAD_THREAD = 20
TARFILE_DOWNLOAD_CONNECTION_TIMEOUT = 60 TARFILE_DOWNLOAD_CONNECTION_TIMEOUT = 60
TARFILE_TRANSFER_CHUNK_SIZE = 1024 * 512 TARFILE_TRANSFER_CHUNK_SIZE = 1024 * 512
DOCKER_REGISTRY_USER = 'admin'
DOCKER_REGISTRY_SERVICE = 'CGCS'
DOCKER_REGISTRY_SECRET = 'default-registry-key'
# Helper functions # Helper functions
@ -97,6 +102,17 @@ def get_app_install_root_path_ownership():
return (uid, gid) return (uid, gid)
def get_local_docker_registry_auth():
registry_password = keyring.get_password(
DOCKER_REGISTRY_SERVICE, DOCKER_REGISTRY_USER)
if not registry_password:
raise exception.DockerRegistryCredentialNotFound(
name=DOCKER_REGISTRY_USER)
return dict(username=DOCKER_REGISTRY_USER,
password=registry_password)
Chart = namedtuple('Chart', 'name namespace') Chart = namedtuple('Chart', 'name namespace')
@ -105,7 +121,7 @@ class AppOperator(object):
def __init__(self, dbapi): def __init__(self, dbapi):
self._dbapi = dbapi self._dbapi = dbapi
self._docker = DockerHelper() self._docker = DockerHelper(self._dbapi)
self._helm = helm.HelmOperator(self._dbapi) self._helm = helm.HelmOperator(self._dbapi)
self._kube = kubernetes.KubeOperator(self._dbapi) self._kube = kubernetes.KubeOperator(self._dbapi)
self._lock = threading.Lock() self._lock = threading.Lock()
@ -653,6 +669,111 @@ class AppOperator(object):
self._remove_host_labels(controller_hosts, controller_labels_set) self._remove_host_labels(controller_hosts, controller_labels_set)
self._remove_host_labels(compute_hosts, compute_labels_set) self._remove_host_labels(compute_hosts, compute_labels_set)
def _create_local_registry_secrets(self, app_name):
# Temporary function to create default registry secret
# which would be used by kubernetes to pull images from
# local registry.
# This should be removed after OSH supports the deployment
# with registry has authentication turned on.
# https://blueprints.launchpad.net/openstack-helm/+spec/
# support-docker-registry-with-authentication-turned-on
body = {
'type': 'kubernetes.io/dockerconfigjson',
'metadata': {},
'data': {}
}
app_ns = self._helm.get_helm_application_namespaces(app_name)
namespaces = \
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
for ns in namespaces:
if (ns == common.HELM_NS_HELM_TOOLKIT or
self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns)):
# Secret already exist
continue
try:
local_registry_server = self._docker.get_local_docker_registry_server()
local_registry_auth = get_local_docker_registry_auth()
auth = '{0}:{1}'.format(local_registry_auth['username'],
local_registry_auth['password'])
token = '{{\"auths\": {{\"{0}\": {{\"auth\": \"{1}\"}}}}}}'.format(
local_registry_server, base64.b64encode(auth))
body['data'].update({'.dockerconfigjson': base64.b64encode(token)})
body['metadata'].update({'name': DOCKER_REGISTRY_SECRET,
'namespace': ns})
if not self._kube.kube_get_namespace(ns):
self._kube.kube_create_namespace(ns)
self._kube.kube_create_secret(ns, body)
LOG.info("Secret %s created under Namespace %s." % (DOCKER_REGISTRY_SECRET, ns))
except Exception as e:
LOG.error(e)
raise
def _delete_local_registry_secrets(self, app_name):
# Temporary function to delete default registry secrets
# which created during stx-opesntack app apply.
# This should be removed after OSH supports the deployment
# with registry has authentication turned on.
# https://blueprints.launchpad.net/openstack-helm/+spec/
# support-docker-registry-with-authentication-turned-on
app_ns = self._helm.get_helm_application_namespaces(app_name)
namespaces = \
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
for ns in namespaces:
if ns == common.HELM_NS_HELM_TOOLKIT:
continue
try:
LOG.info("Deleting Secret %s under Namespace "
"%s ..." % (DOCKER_REGISTRY_SECRET, ns))
self._kube.kube_delete_secret(
DOCKER_REGISTRY_SECRET, ns, grace_period_seconds=0)
LOG.info("Secret %s under Namespace %s delete "
"completed." % (DOCKER_REGISTRY_SECRET, ns))
except Exception as e:
LOG.error(e)
raise
def _delete_namespace(self, namespace):
loop_timeout = 1
timeout = 300
try:
LOG.info("Deleting Namespace %s ..." % namespace)
self._kube.kube_delete_namespace(namespace,
grace_periods_seconds=0)
# Namespace termination timeout 5mins
while(loop_timeout <= timeout):
if not self._kube.kube_get_namespace(namespace):
# Namepace has been terminated
break
loop_timeout += 1
time.sleep(1)
if loop_timeout > timeout:
raise exception.K8sNamespaceDeleteTimeout(name=namespace)
LOG.info("Namespace %s delete completed." % namespace)
except Exception as e:
LOG.error(e)
raise
def _delete_persistent_volume_claim(self, namespace):
try:
LOG.info("Deleting Persistent Volume Claim "
"under Namespace %s ..." % namespace)
self._kube.kube_delete_persistent_volume_claim(namespace,
timeout_seconds=10)
LOG.info("Persistent Volume Claim delete completed.")
except Exception as e:
LOG.error(e)
raise
def _get_list_of_charts(self, manifest_file): def _get_list_of_charts(self, manifest_file):
charts = [] charts = []
with open(manifest_file, 'r') as f: with open(manifest_file, 'r') as f:
@ -893,6 +1014,7 @@ class AppOperator(object):
try: try:
app.charts = self._get_list_of_charts(app.armada_mfile_abs) app.charts = self._get_list_of_charts(app.armada_mfile_abs)
if app.system_app: if app.system_app:
self._create_local_registry_secrets(app.name)
self._update_app_status( self._update_app_status(
app, new_progress=constants.APP_PROGRESS_GENERATE_OVERRIDES) app, new_progress=constants.APP_PROGRESS_GENERATE_OVERRIDES)
LOG.info("Generating application overrides...") LOG.info("Generating application overrides...")
@ -956,59 +1078,14 @@ class AppOperator(object):
if self._make_armada_request_with_monitor(app, constants.APP_DELETE_OP): if self._make_armada_request_with_monitor(app, constants.APP_DELETE_OP):
if app.system_app: if app.system_app:
# TODO convert these kubectl commands to use the k8s api
p1 = subprocess.Popen(
['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
'get', 'pvc', '--no-headers', '-n', 'openstack'],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['awk', '{print $3}'],
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(
['xargs', '-i', 'kubectl',
'--kubeconfig=/etc/kubernetes/admin.conf', 'delete',
'pv', '{}', '--wait=false'],
stdin=p2.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = threading.Timer(10, p3.kill)
try: try:
timer.start() self._delete_local_registry_secrets(app.name)
p1.stdout.close() self._delete_persistent_volume_claim(common.HELM_NS_OPENSTACK)
p2.stdout.close() self._delete_namespace(common.HELM_NS_OPENSTACK)
out, err = p3.communicate()
if out and not err:
LOG.info("Persistent Volumes marked for deletion.")
else:
self._abort_operation(app, constants.APP_REMOVE_OP)
LOG.error("Failed to clean up PVs after app removal.")
except Exception as e: except Exception as e:
self._abort_operation(app, constants.APP_REMOVE_OP) self._abort_operation(app, constants.APP_REMOVE_OP)
LOG.exception("Failed to clean up PVs after app " LOG.exception(e)
"removal: %s" % e) return False
finally:
timer.cancel()
p4 = subprocess.Popen(
['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
'delete', 'namespace', 'openstack'],
stdout=subprocess.PIPE)
timer2 = threading.Timer(10, p4.kill)
try:
timer2.start()
out, err = p4.communicate()
if out and not err:
LOG.info("Openstack namespace delete completed.")
else:
self._abort_operation(app, constants.APP_REMOVE_OP)
LOG.error("Failed to clean up openstack namespace"
" after app removal.")
except Exception as e:
self._abort_operation(app, constants.APP_REMOVE_OP)
LOG.exception("Failed to clean up openstack namespace "
"after app removal: %s" % e)
finally:
timer2.cancel()
self._update_app_status(app, constants.APP_UPLOAD_SUCCESS) self._update_app_status(app, constants.APP_UPLOAD_SUCCESS)
LOG.info("Application (%s) remove completed." % app.name) LOG.info("Application (%s) remove completed." % app.name)
@ -1104,6 +1181,9 @@ class AppOperator(object):
class DockerHelper(object): class DockerHelper(object):
""" Utility class to encapsulate Docker related operations """ """ Utility class to encapsulate Docker related operations """
def __init__(self, dbapi):
self._dbapi = dbapi
def _start_armada_service(self, client): def _start_armada_service(self, client):
try: try:
container = client.containers.get(ARMADA_CONTAINER_NAME) container = client.containers.get(ARMADA_CONTAINER_NAME)
@ -1229,34 +1309,60 @@ class DockerHelper(object):
(request, manifest_file, e)) (request, manifest_file, e))
return rc return rc
def download_an_image(self, loc_img_tag): def get_local_docker_registry_server(self):
registry_ip = self._dbapi.address_get_by_name(
cutils.format_address_name(constants.CONTROLLER_HOSTNAME,
constants.NETWORK_TYPE_MGMT)
).address
registry_server = '{}:{}'.format(registry_ip, common.REGISTRY_PORT)
return registry_server
def download_an_image(self, img_tag):
rc = True rc = True
local_registry_server = self.get_local_docker_registry_server()
start = time.time() start = time.time()
try: if img_tag.startswith(local_registry_server):
# Pull image from local docker registry
LOG.info("Image %s download started from local registry" % loc_img_tag)
client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
client.pull(loc_img_tag)
except docker.errors.NotFound:
try: try:
# Image is not available in local docker registry, get the image LOG.info("Image %s download started from local registry" % img_tag)
# from the public registry and push to the local registry local_registry_auth = get_local_docker_registry_auth()
LOG.info("Image %s is not available in local registry, " client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
"download started from public registry" % loc_img_tag) client.pull(img_tag, auth_config=local_registry_auth)
pub_img_tag = loc_img_tag[1 + loc_img_tag.find('/'):] except docker.errors.NotFound:
client.pull(pub_img_tag) try:
client.tag(pub_img_tag, loc_img_tag) # Pull the image from the public registry
client.push(loc_img_tag) LOG.info("Image %s is not available in local registry, "
"download started from public registry" % img_tag)
pub_img_tag = img_tag.replace(local_registry_server + "/", "")
client.pull(pub_img_tag)
except Exception as e:
rc = False
LOG.error("Image %s download failed from public registry: %s" % (pub_img_tag, e))
return img_tag, rc
try:
# Tag and push the image to the local registry
client.tag(pub_img_tag, img_tag)
client.push(img_tag, auth_config=local_registry_auth)
except Exception as e:
rc = False
LOG.error("Image %s push failed to local registry: %s" % (img_tag, e))
except Exception as e: except Exception as e:
rc = False rc = False
LOG.error("Image %s download failed from public registry: %s" % (pub_img_tag, e)) LOG.error("Image %s download failed from local registry: %s" % (img_tag, e))
except Exception as e:
rc = False
LOG.error("Image %s download failed from local registry: %s" % (loc_img_tag, e))
elapsed_time = time.time() - start
else:
try:
LOG.info("Image %s download started from public registry" % img_tag)
client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
client.pull(img_tag)
except Exception as e:
rc = False
LOG.error("Image %s download failed from public registry: %s" % (img_tag, e))
elapsed_time = time.time() - start
if rc: if rc:
LOG.info("Image %s download succeeded in %d seconds" % LOG.info("Image %s download succeeded in %d seconds" %
(loc_img_tag, elapsed_time)) (img_tag, elapsed_time))
return loc_img_tag, rc return img_tag, rc

View File

@ -24,6 +24,7 @@ class AodhHelm(openstack.OpenstackBaseHelm):
def get_overrides(self, namespace=None): def get_overrides(self, namespace=None):
overrides = { overrides = {
common.HELM_NS_OPENSTACK: { common.HELM_NS_OPENSTACK: {
'pod': self._get_pod_overrides(),
'images': self._get_images_overrides(), 'images': self._get_images_overrides(),
'conf': self._get_conf_overrides(), 'conf': self._get_conf_overrides(),
'endpoints': self._get_endpoints_overrides() 'endpoints': self._get_endpoints_overrides()
@ -38,6 +39,17 @@ class AodhHelm(openstack.OpenstackBaseHelm):
else: else:
return overrides return overrides
def _get_pod_overrides(self):
overrides = {
'replicas': {
'api': self._num_controllers(),
'evaluator': self._num_controllers(),
'listener': self._num_controllers(),
'notifier': self._num_controllers()
}
}
return overrides
def _get_images_overrides(self): def _get_images_overrides(self):
heat_image = self._operator.chart_operators[ heat_image = self._operator.chart_operators[
constants.HELM_CHART_HEAT].docker_image constants.HELM_CHART_HEAT].docker_image

View File

@ -32,7 +32,8 @@ class MariadbHelm(openstack.OpenstackBaseHelm):
common.HELM_NS_OPENSTACK: { common.HELM_NS_OPENSTACK: {
'pod': { 'pod': {
'replicas': { 'replicas': {
'server': self._num_server_replicas() 'server': self._num_server_replicas(),
'ingress': self._num_controllers()
} }
}, },
'images': self._get_images_overrides(), 'images': self._get_images_overrides(),

View File

@ -32,40 +32,13 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
overrides = { overrides = {
common.HELM_NS_OPENSTACK: { common.HELM_NS_OPENSTACK: {
'pod': { 'pod': {
'user': {
'neutron': {
'uid': 0
}
},
'replicas': { 'replicas': {
'server': self._num_controllers() 'server': self._num_controllers()
}, },
}, },
'network': {
'interface': {
'tunnel': 'docker0'
},
'backend': ['openvswitch', 'sriov'],
},
'conf': { 'conf': {
'neutron': self._get_neutron_config(),
'plugins': { 'plugins': {
'ml2_conf': self._get_neutron_ml2_config(), 'ml2_conf': self._get_neutron_ml2_config()
},
'dhcp_agent': {
'DEFAULT': {
'resync_interval': 30,
'enable_isolated_metadata': True,
'enable_metadata_network': False,
'interface_driver': 'openvswitch',
},
},
'l3_agent': {
'DEFAULT': {
'interface_driver': 'openvswitch',
'agent_mode': 'dvr_snat',
'metadata_port': 80,
},
}, },
'overrides': { 'overrides': {
'neutron_ovs-agent': { 'neutron_ovs-agent': {
@ -91,7 +64,6 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
}, },
}, },
}, },
'labels': self._get_labels_overrides(),
'endpoints': self._get_endpoints_overrides(), 'endpoints': self._get_endpoints_overrides(),
'images': self._get_images_overrides(), 'images': self._get_images_overrides(),
} }
@ -122,30 +94,92 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
def update_dynamic_options(self, overrides): def update_dynamic_options(self, overrides):
if utils.is_virtual(): if utils.is_virtual():
overrides['plugins']['ml2_conf']['ovs_driver']['vhost_user_enabled'] = False overrides.update({
'plugins': {
'ml2_conf': {
'ovs_driver': {
'vhost_user_enabled': False
}
}
}
})
def update_from_service_parameters(self, overrides): def update_from_service_parameters(self, overrides):
service_parameters = self._get_service_parameters(service=constants.SERVICE_TYPE_NETWORK) service_parameters = self._get_service_parameters(service=constants.SERVICE_TYPE_NETWORK)
for param in service_parameters: for param in service_parameters:
if param.section == constants.SERVICE_PARAM_SECTION_NETWORK_DEFAULT: if param.section == constants.SERVICE_PARAM_SECTION_NETWORK_DEFAULT:
if param.name == constants.SERVICE_PARAM_NAME_DEFAULT_SERVICE_PLUGINS: if param.name == constants.SERVICE_PARAM_NAME_DEFAULT_SERVICE_PLUGINS:
overrides['neutron']['DEFAULT']['service_plugins'] = str(param.value) overrides.update({
'neutron': {
'DEFAULT': {
'service_plugins': str(param.value)
}
}
})
if param.name == constants.SERVICE_PARAM_NAME_DEFAULT_DNS_DOMAIN: if param.name == constants.SERVICE_PARAM_NAME_DEFAULT_DNS_DOMAIN:
overrides['neutron']['DEFAULT']['dns_domain'] = str(param.value) overrides.update({
'neutron': {
'DEFAULT': {
'dns_domain': str(param.value)
}
}
})
if param.name == constants.SERVICE_PARAM_NAME_BASE_MAC: if param.name == constants.SERVICE_PARAM_NAME_BASE_MAC:
overrides['neutron']['DEFAULT']['base_mac'] = str(param.value) overrides.update({
'neutron': {
'DEFAULT': {
'base_mac': str(param.value)
}
}
})
if param.name == constants.SERVICE_PARAM_NAME_DVR_BASE_MAC: if param.name == constants.SERVICE_PARAM_NAME_DVR_BASE_MAC:
overrides['neutron']['DEFAULT']['dvr_base_mac'] = str(param.value) overrides.update({
'neutron': {
'DEFAULT': {
'dvr_base_mac': str(param.value)
}
}
})
elif param.section == constants.SERVICE_PARAM_SECTION_NETWORK_ML2: elif param.section == constants.SERVICE_PARAM_SECTION_NETWORK_ML2:
if param.name == constants.SERVICE_PARAM_NAME_ML2_MECHANISM_DRIVERS: if param.name == constants.SERVICE_PARAM_NAME_ML2_MECHANISM_DRIVERS:
overrides['plugins']['ml2_conf']['ml2']['mechanism_drivers'] = str(param.value) overrides.update({
'plugins': {
'ml2_conf': {
'ml2': {
'mechanism_drivers': str(param.value)
}
}
}
})
if param.name == constants.SERVICE_PARAM_NAME_ML2_EXTENSION_DRIVERS: if param.name == constants.SERVICE_PARAM_NAME_ML2_EXTENSION_DRIVERS:
overrides['plugins']['ml2_conf']['ml2']['extension_drivers'] = str(param.value) overrides.update({
'plugins': {
'ml2_conf': {
'ml2': {
'extension_drivers': str(param.value)
}
}
}
})
if param.name == constants.SERVICE_PARAM_NAME_ML2_TENANT_NETWORK_TYPES: if param.name == constants.SERVICE_PARAM_NAME_ML2_TENANT_NETWORK_TYPES:
overrides['plugins']['ml2_conf']['ml2']['tenant_network_types'] = str(param.value) overrides.update({
'plugins': {
'ml2_conf': {
'ml2': {
'tenant_network_types': str(param.value)
}
}
}
})
elif param.section == constants.SERVICE_PARAM_SECTION_NETWORK_DHCP: elif param.section == constants.SERVICE_PARAM_SECTION_NETWORK_DHCP:
if param.name == constants.SERVICE_PARAM_NAME_DHCP_FORCE_METADATA: if param.name == constants.SERVICE_PARAM_NAME_DHCP_FORCE_METADATA:
overrides['dhcp_agent']['DEFAULT']['force_metadata'] = str(param.value) overrides.update({
'dhcp_agent': {
'DEFAULT': {
'force_metadata': str(param.value)
}
}
})
def _get_per_host_overrides(self): def _get_per_host_overrides(self):
host_list = [] host_list = []
@ -252,50 +286,17 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
'securitygroup': { 'securitygroup': {
'firewall_driver': 'noop', 'firewall_driver': 'noop',
}, },
# Mitigate host OS memory leak of cgroup session-*scope files
# and kernel slab resources. The leak is triggered using 'sudo'
# which utilizes the host dbus-daemon. The sriov agent frequently
# polls devices via 'ip link show' using run_as_root=True, but
# does not actually require 'sudo'.
'agent': {
'root_helper': '',
},
'sriov_nic': sriov_nic, 'sriov_nic': sriov_nic,
} }
def _get_neutron_config(self):
neutron_config = {
'DEFAULT': {
'l3_ha': False,
'min_l3_agents_per_router': 1,
'max_l3_agents_per_router': 1,
'l3_ha_network_type': 'vxlan',
'dhcp_agents_per_network': 1,
'max_overflow': 64,
'max_pool_size': 1,
'idle_timeout': 60,
'router_status_managed': True,
'vlan_transparent': True,
'wsgi_default_pool_size': 100,
'notify_nova_on_port_data_changes': True,
'notify_nova_on_port_status_changes': True,
'control_exchange': 'neutron',
'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'state_path': '/var/run/neutron',
'syslog_log_facility': 'local2',
'use_syslog': True,
'pnet_audit_enabled': False,
'driver': 'messagingv2',
'enable_proxy_headers_parsing': True,
'lock_path': '/var/run/neutron/lock',
'log_format': '[%(name)s] %(message)s',
'policy_file': '/etc/neutron/policy.json',
'service_plugins':
'router' + ',' + constants.NEUTRON_PLUGIN_NETWORK_SEGMENT_RANGE,
'dns_domain': 'openstacklocal',
'enable_new_agents': False,
'allow_automatic_dhcp_failover': True,
'allow_automatic_l3agent_failover': True,
},
'agent': {
'root_helper': 'sudo',
},
}
return neutron_config
def _get_ml2_physical_network_mtus(self): def _get_ml2_physical_network_mtus(self):
ml2_physical_network_mtus = [] ml2_physical_network_mtus = []
datanetworks = self.dbapi.datanetworks_get_all() datanetworks = self.dbapi.datanetworks_get_all()
@ -308,22 +309,7 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
def _get_neutron_ml2_config(self): def _get_neutron_ml2_config(self):
ml2_config = { ml2_config = {
'ml2': { 'ml2': {
'type_drivers': 'flat,vlan,vxlan',
'tenant_network_types': 'vlan,vxlan',
'mechanism_drivers': 'openvswitch,sriovnicswitch,l2population',
'path_mtu': 0,
'physical_network_mtus': self._get_ml2_physical_network_mtus() 'physical_network_mtus': self._get_ml2_physical_network_mtus()
},
'ovs_driver': {
'vhost_user_enabled': True,
},
'securitygroup': {
'firewall_driver': 'noop',
},
'ml2_type_vxlan': {
'vni_ranges': '',
'vxlan_group': '',
}, },
} }
LOG.info("_get_neutron_ml2_config=%s" % ml2_config) LOG.info("_get_neutron_ml2_config=%s" % ml2_config)
@ -426,16 +412,5 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
return overrides return overrides
def _get_labels_overrides(self):
overrides = {
'agent': {
'dhcp': {'node_selector_key': 'openvswitch'},
'l3': {'node_selector_key': 'openvswitch'},
'metadata': {'node_selector_key': 'openvswitch'},
},
}
return overrides
def get_region_name(self): def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME) return self._get_service_region_name(self.SERVICE_NAME)

View File

@ -69,7 +69,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
'conductor': self._num_controllers(), 'conductor': self._num_controllers(),
'consoleauth': self._num_controllers(), 'consoleauth': self._num_controllers(),
'scheduler': self._num_controllers(), 'scheduler': self._num_controllers(),
# set replicas for novncproxy once it's validated. 'novncproxy': self._num_controllers()
} }
}, },
'conf': { 'conf': {

View File

@ -24,6 +24,7 @@ class PankoHelm(openstack.OpenstackBaseHelm):
def get_overrides(self, namespace=None): def get_overrides(self, namespace=None):
overrides = { overrides = {
common.HELM_NS_OPENSTACK: { common.HELM_NS_OPENSTACK: {
'pod': self._get_pod_overrides(),
'images': self._get_images_overrides(), 'images': self._get_images_overrides(),
'endpoints': self._get_endpoints_overrides() 'endpoints': self._get_endpoints_overrides()
} }
@ -37,6 +38,14 @@ class PankoHelm(openstack.OpenstackBaseHelm):
else: else:
return overrides return overrides
def _get_pod_overrides(self):
overrides = {
'replicas': {
'api': self._num_controllers()
}
}
return overrides
def _get_images_overrides(self): def _get_images_overrides(self):
heat_image = self._operator.chart_operators[ heat_image = self._operator.chart_operators[
constants.HELM_CHART_HEAT].docker_image constants.HELM_CHART_HEAT].docker_image

View File

@ -104,6 +104,7 @@ class RbdProvisionerHelm(base.BaseHelm):
"classes": classes, "classes": classes,
"ephemeral_pools": ephemeral_pools, "ephemeral_pools": ephemeral_pools,
"images": self._get_images_overrides(), "images": self._get_images_overrides(),
"pods": self._get_pod_overrides()
} }
} }
@ -115,6 +116,14 @@ class RbdProvisionerHelm(base.BaseHelm):
else: else:
return overrides return overrides
def _get_pod_overrides(self):
overrides = {
'replicas': {
'rbd-provisioner': self._num_controllers()
}
}
return overrides
def _get_images_overrides(self): def _get_images_overrides(self):
# TODO: Remove after ceph upgrade # TODO: Remove after ceph upgrade
# Format the name of the stx specific ceph config helper # Format the name of the stx specific ceph config helper