Merge remote-tracking branch 'starlingx/f/stein' into HEAD

Change-Id: I34c1f30c81b3a05a2d97c6aa7d83c38c68b0c055
Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
Scott Little 2019-03-01 12:42:42 -05:00
commit 1b22b5313d
30 changed files with 621 additions and 454 deletions

View File

@ -326,8 +326,7 @@ class ConfigAssistant():
"""
self.labmode = labmode
# Temporary flag to be removed once kubernetes installs are the default
self.kubernetes = kubernetes
self.kubernetes = True
self.config_uuid = "install"

View File

@ -39,6 +39,15 @@ CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=-infra
INFRASTRUCTURE_START_ADDRESS=192.168.205.2
INFRASTRUCTURE_END_ADDRESS=192.168.205.254
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
@ -52,6 +61,12 @@ EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cNETWORK]
# Data Network Configuration
VSWITCH_TYPE=ovs-dpdk

View File

@ -45,6 +45,15 @@ CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
INFRASTRUCTURE_START_ADDRESS=NC
INFRASTRUCTURE_END_ADDRESS=NC
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
@ -58,6 +67,12 @@ EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cNETWORK]
# Data Network Configuration
VSWITCH_TYPE=ovs-dpdk

View File

@ -45,6 +45,15 @@ CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
INFRASTRUCTURE_START_ADDRESS=NC
INFRASTRUCTURE_END_ADDRESS=NC
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
@ -58,6 +67,12 @@ EXTERNAL_OAM_FLOATING_ADDRESS=abcd::2
EXTERNAL_OAM_0_ADDRESS=abcd::3
EXTERNAL_OAM_1_ADDRESS=abcd::4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cNETWORK]
# Data Network Configuration
VSWITCH_TYPE=ovs-dpdk

View File

@ -47,6 +47,15 @@ CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
INFRASTRUCTURE_START_ADDRESS=NC
INFRASTRUCTURE_END_ADDRESS=NC
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
@ -60,6 +69,12 @@ EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cNETWORK]
# Data Network Configuration
VSWITCH_TYPE=ovs-dpdk

View File

@ -47,6 +47,15 @@ CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
INFRASTRUCTURE_START_ADDRESS=NC
INFRASTRUCTURE_END_ADDRESS=NC
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
@ -60,6 +69,12 @@ EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cNETWORK]
# Data Network Configuration
VSWITCH_TYPE=nuage_vrs

View File

@ -1,3 +1,3 @@
SRC_DIR="stx-openstack-helm"
COPY_LIST_TO_TAR="$PKG_BASE/../../../helm-charts/rbd-provisioner $PKG_BASE/../../../helm-charts/garbd"
COPY_LIST_TO_TAR="$PKG_BASE/../../../helm-charts/rbd-provisioner $PKG_BASE/../../../helm-charts/garbd $PKG_BASE/../../../helm-charts/ceph-pools-audit"
TIS_PATCH_VER=6

View File

@ -58,6 +58,7 @@ helm repo add local http://localhost:8879/charts
make nova-api-proxy
make rbd-provisioner
make garbd
make ceph-pools-audit
# terminate helm server (the last backgrounded task)
kill %1

View File

@ -141,6 +141,35 @@ data:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-ceph-pools-audit
data:
chart_name: ceph-pools-audit
release: openstack-ceph-pools-audit
namespace: openstack
wait:
timeout: 1800
labels:
app: ceph-pools-audit
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: ceph-pools-audit
source:
type: tar
location: http://172.17.0.1/helm_charts/ceph-pools-audit-0.1.0.tgz
subpath: ceph-pools-audit
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
@ -698,9 +727,13 @@ data:
upgrade_levels: None
metrics:
required: false
workarounds:
enable_numa_live_migration: True
network:
sshd:
enabled: true
console:
address_search_enabled: false
source:
type: tar
@ -753,10 +786,10 @@ data:
chart_name: neutron
release: openstack-neutron
namespace: openstack
install:
no_hooks: false
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
@ -855,7 +888,7 @@ data:
lock_path: /var/run/neutron/lock
log_format: '[%(name)s] %(message)s'
policy_file: /etc/neutron/policy.json
service_plugins: router
service_plugins: router,network_segment_range
dns_domain: openstacklocal
enable_new_agents: false
allow_automatic_dhcp_failover: true
@ -863,7 +896,7 @@ data:
agent:
root_helper: sudo
vhost:
vhost_user_enabled: false
vhost_user_enabled: true
dhcp_agent:
DEFAULT:
enable_isolated_metadata: true
@ -883,7 +916,12 @@ data:
mechanism_drivers: openvswitch,sriovnicswitch,l2population
path_mtu: 0
tenant_network_types: vlan,vxlan
type_drivers: managed_flat,managed_vlan,managed_vxlan
type_drivers: flat,vlan,vxlan
ml2_type_vxlan:
vni_ranges: ''
vxlan_group: ''
ovs_driver:
vhost_user_enabled: true
securitygroup:
firewall_driver: noop
openvswitch_agent:
@ -947,6 +985,10 @@ data:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conf:
policy:
stacks:global_index: rule:context_is_admin
software_configs:global_index: rule:context_is_admin
pod:
replicas:
api: 2
@ -1086,11 +1128,11 @@ data:
/v1: gnocchiv1+noauth
/healthcheck: healthcheck
pipeline:gnocchiv1+noauth:
pipeline: http_proxy_to_wsgi gnocchiv1
pipeline: gnocchiv1
pipeline:gnocchiv1+keystone:
pipeline: http_proxy_to_wsgi keystone_authtoken gnocchiv1
pipeline: keystone_authtoken gnocchiv1
pipeline:gnocchiversions_pipeline:
pipeline: http_proxy_to_wsgi gnocchiversions
pipeline: gnocchiversions
app:gnocchiversions:
paste.app_factory: gnocchi.rest.app:app_factory
root: gnocchi.rest.api.VersionsController
@ -1100,9 +1142,6 @@ data:
filter:keystone_authtoken:
use: egg:keystonemiddleware#auth_token
oslo_config_project: gnocchi
filter:http_proxy_to_wsgi:
use: egg:oslo.middleware#http_proxy_to_wsgi
oslo_config_project: gnocchi
app:healthcheck:
use: egg:oslo.middleware#healthcheck
oslo_config_project: gnocchi
@ -1292,14 +1331,6 @@ data:
public: 80
conf:
ceilometer:
DEFAULT:
csv_location: /var/lib/ceilometer/
csv_location_strict: true
shuffle_time_before_polling_task: 30
batch_polled_samples: true
dispatcher_gnocchi:
archive_policy: ''
filter_project: ''
cache:
expiration_time: 86400
compute:
@ -1308,8 +1339,6 @@ data:
oslo_messaging_notifications:
topics:
- notifications
notification:
batch_size: 100
pipeline:
sources:
- name: meter_source
@ -1317,105 +1346,10 @@ data:
- "*"
sinks:
- meter_sink
- csv_sink
- name: cpu_source
meters:
- "cpu"
sinks:
- cpu_sink
- cpu_delta_sink
- vcpu_sink
- name: disk_source
meters:
- "disk.read.bytes"
- "disk.read.requests"
- "disk.write.bytes"
- "disk.write.requests"
- "disk.device.read.bytes"
- "disk.device.read.requests"
- "disk.device.write.bytes"
- "disk.device.write.requests"
sinks:
- disk_sink
- name: network_source
meters:
- "network.incoming.bytes"
- "network.incoming.packets"
- "network.outgoing.bytes"
- "network.outgoing.packets"
sinks:
- network_sink
sinks:
- name: meter_sink
transformers:
publishers:
- gnocchi://
- name: cpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "cpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
publishers:
- gnocchi://
- name: cpu_delta_sink
transformers:
- name: "delta"
parameters:
target:
name: "cpu.delta"
growth_only: true
publishers:
- gnocchi://
- name: vcpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "vcpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.vcpu_number or 1))"
publishers:
- gnocchi://
- name: disk_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
unit: "(B|request)"
target:
map_to:
name: "\\1.\\2.\\3.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: network_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
unit: "(B|packet)"
target:
map_to:
name: "network.\\1.\\2.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: csv_sink
publishers:
- csvfile:///var/lib/ceilometer/pm.csv?max_bytes=10000000&backup_count=5&compress=True&enabled=True
event_pipeline:
sources:
- name: event_source
@ -1425,26 +1359,11 @@ data:
- event_sink
sinks:
- name: event_sink
transformers:
publishers:
- panko://
- gnocchi://
polling:
sources:
- name: instance_pollster
interval: 600
meters:
- disk.read.bytes
- disk.read.bytes.rate
- disk.read.requests
- disk.read.requests.rate
- disk.write.bytes
- disk.write.bytes.rate
- disk.write.requests
- disk.write.requests.rate
- disk.capacity
- disk.allocation
- disk.usage
- name: instance_cpu_pollster
interval: 30
meters:
@ -1452,14 +1371,13 @@ data:
- name: instance_disk_pollster
interval: 600
meters:
- disk.capacity
- disk.allocation
- disk.usage
- disk.device.read.requests
- disk.device.read.requests.rate
- disk.device.write.requests
- disk.device.write.requests.rate
- disk.device.read.bytes
- disk.device.read.bytes.rate
- disk.device.write.bytes
- disk.device.write.bytes.rate
- disk.device.capacity
- disk.device.allocation
- disk.device.usage
@ -1556,26 +1474,11 @@ data:
memory.bandwidth.local:
vcpus:
archive_policy_name: ceilometer-low-rate
vcpu_util:
cpu:
archive_policy_name: ceilometer-low-rate
cpu.delta:
cpu_util:
cpu_l3_cache:
disk.root.size:
disk.ephemeral.size:
disk.read.requests:
archive_policy_name: ceilometer-low-rate
disk.read.requests.rate:
disk.write.requests:
archive_policy_name: ceilometer-low-rate
disk.write.requests.rate:
disk.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.read.bytes.rate:
disk.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.write.bytes.rate:
disk.latency:
disk.iops:
disk.capacity:
@ -1589,6 +1492,9 @@ data:
attributes:
host: resource_metadata.(instance_host|host)
image_ref: resource_metadata.image_ref
launched_at: resource_metadata.launched_at
created_at: resource_metadata.created_at
deleted_at: resource_metadata.deleted_at
display_name: resource_metadata.display_name
flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id)
flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name)
@ -1601,8 +1507,6 @@ data:
instance_disk: '{"=": {"instance_id": "%s"}}'
- resource_type: instance_network_interface
metrics:
network.outgoing.packets.rate:
network.incoming.packets.rate:
network.outgoing.packets:
archive_policy_name: ceilometer-low-rate
network.incoming.packets:
@ -1615,8 +1519,6 @@ data:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.error:
archive_policy_name: ceilometer-low-rate
network.outgoing.bytes.rate:
network.incoming.bytes.rate:
network.outgoing.bytes:
archive_policy_name: ceilometer-low-rate
network.incoming.bytes:
@ -1628,16 +1530,12 @@ data:
metrics:
disk.device.read.requests:
archive_policy_name: ceilometer-low-rate
disk.device.read.requests.rate:
disk.device.write.requests:
archive_policy_name: ceilometer-low-rate
disk.device.write.requests.rate:
disk.device.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.read.bytes.rate:
disk.device.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.write.bytes.rate:
disk.device.latency:
disk.device.read.latency:
disk.device.write.latency:
@ -1674,10 +1572,14 @@ data:
hardware.ipmi.node.cpu_util:
hardware.ipmi.node.mem_util:
hardware.ipmi.node.io_util:
hardware.ipmi.temperature:
hardware.ipmi.voltage:
hardware.ipmi.current:
- resource_type: ipmi_sensor
metrics:
hardware.ipmi.fan:
hardware.ipmi.temperature:
hardware.ipmi.current:
hardware.ipmi.voltage:
attributes:
node: resource_metadata.node
- resource_type: network
metrics:
bandwidth:
@ -1696,7 +1598,6 @@ data:
metrics:
storage.objects.incoming.bytes:
storage.objects.outgoing.bytes:
storage.api.request:
storage.objects.size:
storage.objects:
storage.objects.containers:
@ -1712,6 +1613,8 @@ data:
attributes:
display_name: resource_metadata.(display_name|name)
volume_type: resource_metadata.volume_type
image_id: resource_metadata.image_id
instance_id: resource_metadata.instance_id
event_delete: volume.delete.start
event_attributes:
id: resource_id
@ -2718,6 +2621,16 @@ data:
- openstack-rbd-provisioner
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ceph-pools-audit
data:
description: "Ceph pools audit"
sequenced: false
chart_group:
- openstack-ceph-pools-audit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
@ -2834,6 +2747,7 @@ data:
- kube-system-ingress
- openstack-ingress
- provisioner
- ceph-pools-audit
- openstack-mariadb
- openstack-memcached
- openstack-rabbitmq
@ -2843,3 +2757,4 @@ data:
- openstack-heat
- openstack-horizon
- openstack-cinder
- openstack-telemetry

View File

@ -141,6 +141,35 @@ data:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-ceph-pools-audit
data:
chart_name: ceph-pools-audit
release: openstack-ceph-pools-audit
namespace: openstack
wait:
timeout: 1800
labels:
app: ceph-pools-audit
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: ceph-pools-audit
source:
type: tar
location: http://172.17.0.1/helm_charts/ceph-pools-audit-0.1.0.tgz
subpath: ceph-pools-audit
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
@ -698,9 +727,13 @@ data:
upgrade_levels: None
metrics:
required: false
workarounds:
enable_numa_live_migration: True
network:
sshd:
enabled: true
console:
address_search_enabled: false
source:
type: tar
@ -855,7 +888,7 @@ data:
lock_path: /var/run/neutron/lock
log_format: '[%(name)s] %(message)s'
policy_file: /etc/neutron/policy.json
service_plugins: router
service_plugins: router,network_segment_range
dns_domain: openstacklocal
enable_new_agents: false
allow_automatic_dhcp_failover: true
@ -863,7 +896,7 @@ data:
agent:
root_helper: sudo
vhost:
vhost_user_enabled: false
vhost_user_enabled: true
dhcp_agent:
DEFAULT:
enable_isolated_metadata: true
@ -883,7 +916,12 @@ data:
mechanism_drivers: openvswitch,sriovnicswitch,l2population
path_mtu: 0
tenant_network_types: vlan,vxlan
type_drivers: managed_flat,managed_vlan,managed_vxlan
type_drivers: flat,vlan,vxlan
ml2_type_vxlan:
vni_ranges: ''
vxlan_group: ''
ovs_driver:
vhost_user_enabled: true
securitygroup:
firewall_driver: noop
openvswitch_agent:
@ -947,6 +985,10 @@ data:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conf:
policy:
stacks:global_index: rule:context_is_admin
software_configs:global_index: rule:context_is_admin
pod:
replicas:
api: 2
@ -1086,11 +1128,11 @@ data:
/v1: gnocchiv1+noauth
/healthcheck: healthcheck
pipeline:gnocchiv1+noauth:
pipeline: http_proxy_to_wsgi gnocchiv1
pipeline: gnocchiv1
pipeline:gnocchiv1+keystone:
pipeline: http_proxy_to_wsgi keystone_authtoken gnocchiv1
pipeline: keystone_authtoken gnocchiv1
pipeline:gnocchiversions_pipeline:
pipeline: http_proxy_to_wsgi gnocchiversions
pipeline: gnocchiversions
app:gnocchiversions:
paste.app_factory: gnocchi.rest.app:app_factory
root: gnocchi.rest.api.VersionsController
@ -1100,9 +1142,6 @@ data:
filter:keystone_authtoken:
use: egg:keystonemiddleware#auth_token
oslo_config_project: gnocchi
filter:http_proxy_to_wsgi:
use: egg:oslo.middleware#http_proxy_to_wsgi
oslo_config_project: gnocchi
app:healthcheck:
use: egg:oslo.middleware#healthcheck
oslo_config_project: gnocchi
@ -1292,14 +1331,6 @@ data:
public: 80
conf:
ceilometer:
DEFAULT:
csv_location: /var/lib/ceilometer/
csv_location_strict: true
shuffle_time_before_polling_task: 30
batch_polled_samples: true
dispatcher_gnocchi:
archive_policy: ''
filter_project: ''
cache:
expiration_time: 86400
compute:
@ -1308,8 +1339,6 @@ data:
oslo_messaging_notifications:
topics:
- notifications
notification:
batch_size: 100
pipeline:
sources:
- name: meter_source
@ -1317,105 +1346,10 @@ data:
- "*"
sinks:
- meter_sink
- csv_sink
- name: cpu_source
meters:
- "cpu"
sinks:
- cpu_sink
- cpu_delta_sink
- vcpu_sink
- name: disk_source
meters:
- "disk.read.bytes"
- "disk.read.requests"
- "disk.write.bytes"
- "disk.write.requests"
- "disk.device.read.bytes"
- "disk.device.read.requests"
- "disk.device.write.bytes"
- "disk.device.write.requests"
sinks:
- disk_sink
- name: network_source
meters:
- "network.incoming.bytes"
- "network.incoming.packets"
- "network.outgoing.bytes"
- "network.outgoing.packets"
sinks:
- network_sink
sinks:
- name: meter_sink
transformers:
publishers:
- gnocchi://
- name: cpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "cpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
publishers:
- gnocchi://
- name: cpu_delta_sink
transformers:
- name: "delta"
parameters:
target:
name: "cpu.delta"
growth_only: true
publishers:
- gnocchi://
- name: vcpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "vcpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.vcpu_number or 1))"
publishers:
- gnocchi://
- name: disk_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
unit: "(B|request)"
target:
map_to:
name: "\\1.\\2.\\3.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: network_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
unit: "(B|packet)"
target:
map_to:
name: "network.\\1.\\2.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: csv_sink
publishers:
- csvfile:///var/lib/ceilometer/pm.csv?max_bytes=10000000&backup_count=5&compress=True&enabled=True
event_pipeline:
sources:
- name: event_source
@ -1425,26 +1359,11 @@ data:
- event_sink
sinks:
- name: event_sink
transformers:
publishers:
- panko://
- gnocchi://
polling:
sources:
- name: instance_pollster
interval: 600
meters:
- disk.read.bytes
- disk.read.bytes.rate
- disk.read.requests
- disk.read.requests.rate
- disk.write.bytes
- disk.write.bytes.rate
- disk.write.requests
- disk.write.requests.rate
- disk.capacity
- disk.allocation
- disk.usage
- name: instance_cpu_pollster
interval: 30
meters:
@ -1452,14 +1371,13 @@ data:
- name: instance_disk_pollster
interval: 600
meters:
- disk.capacity
- disk.allocation
- disk.usage
- disk.device.read.requests
- disk.device.read.requests.rate
- disk.device.write.requests
- disk.device.write.requests.rate
- disk.device.read.bytes
- disk.device.read.bytes.rate
- disk.device.write.bytes
- disk.device.write.bytes.rate
- disk.device.capacity
- disk.device.allocation
- disk.device.usage
@ -1556,26 +1474,11 @@ data:
memory.bandwidth.local:
vcpus:
archive_policy_name: ceilometer-low-rate
vcpu_util:
cpu:
archive_policy_name: ceilometer-low-rate
cpu.delta:
cpu_util:
cpu_l3_cache:
disk.root.size:
disk.ephemeral.size:
disk.read.requests:
archive_policy_name: ceilometer-low-rate
disk.read.requests.rate:
disk.write.requests:
archive_policy_name: ceilometer-low-rate
disk.write.requests.rate:
disk.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.read.bytes.rate:
disk.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.write.bytes.rate:
disk.latency:
disk.iops:
disk.capacity:
@ -1589,6 +1492,9 @@ data:
attributes:
host: resource_metadata.(instance_host|host)
image_ref: resource_metadata.image_ref
launched_at: resource_metadata.launched_at
created_at: resource_metadata.created_at
deleted_at: resource_metadata.deleted_at
display_name: resource_metadata.display_name
flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id)
flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name)
@ -1601,8 +1507,6 @@ data:
instance_disk: '{"=": {"instance_id": "%s"}}'
- resource_type: instance_network_interface
metrics:
network.outgoing.packets.rate:
network.incoming.packets.rate:
network.outgoing.packets:
archive_policy_name: ceilometer-low-rate
network.incoming.packets:
@ -1615,8 +1519,6 @@ data:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.error:
archive_policy_name: ceilometer-low-rate
network.outgoing.bytes.rate:
network.incoming.bytes.rate:
network.outgoing.bytes:
archive_policy_name: ceilometer-low-rate
network.incoming.bytes:
@ -1628,16 +1530,12 @@ data:
metrics:
disk.device.read.requests:
archive_policy_name: ceilometer-low-rate
disk.device.read.requests.rate:
disk.device.write.requests:
archive_policy_name: ceilometer-low-rate
disk.device.write.requests.rate:
disk.device.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.read.bytes.rate:
disk.device.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.write.bytes.rate:
disk.device.latency:
disk.device.read.latency:
disk.device.write.latency:
@ -1674,10 +1572,14 @@ data:
hardware.ipmi.node.cpu_util:
hardware.ipmi.node.mem_util:
hardware.ipmi.node.io_util:
hardware.ipmi.temperature:
hardware.ipmi.voltage:
hardware.ipmi.current:
- resource_type: ipmi_sensor
metrics:
hardware.ipmi.fan:
hardware.ipmi.temperature:
hardware.ipmi.current:
hardware.ipmi.voltage:
attributes:
node: resource_metadata.node
- resource_type: network
metrics:
bandwidth:
@ -1696,7 +1598,6 @@ data:
metrics:
storage.objects.incoming.bytes:
storage.objects.outgoing.bytes:
storage.api.request:
storage.objects.size:
storage.objects:
storage.objects.containers:
@ -1712,6 +1613,8 @@ data:
attributes:
display_name: resource_metadata.(display_name|name)
volume_type: resource_metadata.volume_type
image_id: resource_metadata.image_id
instance_id: resource_metadata.instance_id
event_delete: volume.delete.start
event_attributes:
id: resource_id
@ -2718,6 +2621,16 @@ data:
- openstack-rbd-provisioner
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ceph-pools-audit
data:
description: "Ceph pools audit"
sequenced: false
chart_group:
- openstack-ceph-pools-audit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
@ -2834,6 +2747,7 @@ data:
- kube-system-ingress
- openstack-ingress
- provisioner
- ceph-pools-audit
- openstack-mariadb
- openstack-memcached
- openstack-rabbitmq
@ -2843,3 +2757,4 @@ data:
- openstack-heat
- openstack-horizon
- openstack-cinder
- openstack-telemetry

View File

@ -0,0 +1,10 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
appVersion: "1.0"
description: Ceph RBD pool replication monitor chart
name: ceph-pools-audit
version: 0.1.0

View File

@ -0,0 +1,9 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
dependencies:
- name: helm-toolkit
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,64 @@
#!/bin/bash
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
ceph -s
ret=$?
if [ $ret -ne 0 ]; then
msg="Error: Ceph cluster is not accessible, check Pod logs for details."
echo "$msg"
exit $ret
fi
touch /etc/ceph/ceph.client.admin.keyring
echo "RBD_POOL_CRUSH_RULESET: $RBD_POOL_CRUSH_RULESET"
if [ -z $RBD_POOL_CRUSH_RULESET ]; then
msg="No Ceph crush ruleset specified"
echo "$msg"
exit 1
fi
ruleset=$(ceph osd crush rule dump $RBD_POOL_CRUSH_RULESET | grep \"ruleset\" | awk '{print $2}' | grep -Eo '[0-9]+')
ret=$?
if [ $ret -ne 0 ]; then
msg="Ceph crush ruleset $RBD_POOL_CRUSH_RULESET not found, exit"
echo "$msg"
exit $ret
fi
echo "ruleset: $ruleset"
set -ex
POOLS=( $(ceph osd pool ls) )
for pool_name in "${POOLS[@]}"
do
echo "Check for pool name: $pool_name"
pool_crush_ruleset=$(ceph osd pool get $pool_name crush_ruleset | awk '{print $2}')
echo "pool_crush_ruleset: $pool_crush_ruleset"
if [ "$pool_crush_ruleset" != "$ruleset" ]; then
continue
fi
pool_size=$(ceph osd pool get $pool_name size | awk '{print $2}')
pool_min_size=$(ceph osd pool get $pool_name min_size | awk '{print $2}')
echo "===> pool_size: $pool_size pool_min_size: $pool_min_size"
if [ $pool_size != $RBD_POOL_REPLICATION ]; then
echo "set replication for pool $pool_name at $RBD_POOL_REPLICATION"
ceph osd pool set $pool_name size $RBD_POOL_REPLICATION
fi
if [ $pool_min_size != $RBD_POOL_MIN_REPLICATION ]; then
echo "set min replication for pool $pool_name at $RBD_POOL_MIN_REPLICATION"
ceph osd pool set $pool_name min_size $RBD_POOL_MIN_REPLICATION
fi
done

View File

@ -0,0 +1,19 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.manifests.configmap_bin }}
{{- $envAll := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-pools-bin
data:
ceph-pools-audit.sh: |
{{ tuple "bin/_ceph-pools-audit.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -0,0 +1,82 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.manifests.job_ceph_pools_audit }}
{{- $envAll := . }}
---
#
# The CronJob makes sure all the Ceph pools have the right replication,
# as present in the attributes of the Ceph backends.
# This is needed for:
# - charts that don't manage pool configuration
# - pools created dynamically by services that may not have the current
# pool configuration uploaded (ex: swift)
# - when replication is changed and we don't want to reinstall all the
# charts that created Ceph pools
#
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: ceph-pools-audit
spec:
schedule: {{ .Values.jobs.job_ceph_pools_audit.cron | quote }}
successfulJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.success }}
failedJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.failed }}
concurrencyPolicy: Forbid
jobTemplate:
metadata:
name: "{{$envAll.Release.Name}}"
namespace: {{ $envAll.Release.namespace }}
labels:
app: ceph-pools-audit
spec:
template:
metadata:
labels:
app: ceph-pools-audit
spec:
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
volumes:
- name: ceph-pools-bin
configMap:
name: ceph-pools-bin
defaultMode: 0555
- name: etcceph
emptyDir: {}
- name: ceph-etc
configMap:
name: {{ $envAll.Values.ceph_client.configmap }}
defaultMode: 0444
containers:
{{- range $tierConfig := $envAll.Values.conf.ceph.storage_tiers }}
- name: ceph-pools-audit-{{- $tierConfig.name }}
image: {{ $envAll.Values.images.tags.ceph_config_helper | quote }}
env:
- name: RBD_POOL_REPLICATION
value: {{ $tierConfig.replication | quote }}
- name: RBD_POOL_MIN_REPLICATION
value: {{ $tierConfig.min_replication | quote }}
- name: RBD_POOL_CRUSH_RULESET
value: {{ $tierConfig.crush_ruleset | quote }}
command:
- /tmp/ceph-pools-audit.sh
volumeMounts:
- name: ceph-pools-bin
mountPath: /tmp/ceph-pools-audit.sh
subPath: ceph-pools-audit.sh
readOnly: true
- name: etcceph
mountPath: /etc/ceph
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
{{- end }}
{{- end }}

View File

@ -0,0 +1,49 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
replicaCount: 1
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
name: ceph-pools-audit
ceph_client:
configmap: ceph-etc
conf:
ceph:
storage_tiers:
- name: ceph-store
replication: 2
min_replication: 1
crush_ruleset: storage_tier_ruleset
monitors: []
images:
tags:
ceph_config_helper: docker.io/port/ceph-config-helper:v1.10.3
pullPolicy: "IfNotPresent"
jobs:
job_ceph_pools_audit:
cron: "*/5 * * * *"
history:
success: 3
failed: 1
resources: {}
nodeSelector: { node-role.kubernetes.io/master: "" }
tolerations: []
affinity: {}
manifests:
job_ceph_pools_audit: true
configmap_bin: true

View File

@ -103,9 +103,6 @@ include ::openstack::ceilometer
include ::openstack::ceilometer::agent::notification
include ::openstack::ceilometer::polling
include ::openstack::aodh
include ::openstack::aodh::api
include ::openstack::panko
include ::openstack::panko::api

View File

@ -339,11 +339,6 @@ class openstack::keystone::endpoint::runtime {
include ::nfv::keystone::auth
include ::fm::keystone::auth
include ::openstack::aodh::params
if $::openstack::aodh::params::service_enabled {
include ::aodh::keystone::auth
}
include ::ceilometer::keystone::auth
include ::openstack::heat::params
@ -391,7 +386,7 @@ class openstack::keystone::endpoint::runtime {
}
include ::platform::ceph::params
if $::platform::ceph::params::rgw_enabled {
if $::platform::ceph::params::service_enabled {
include ::platform::ceph::rgw::keystone::auth
}

View File

@ -21,7 +21,6 @@ class platform::ceph::params(
$mon_2_host = undef,
$mon_2_ip = undef,
$mon_2_addr = undef,
$rgw_enabled = false,
$rgw_client_name = 'radosgw.gateway',
$rgw_user_name = 'root',
$rgw_frontend_type = 'civetweb',
@ -373,7 +372,7 @@ class platform::ceph::osds(
class platform::ceph::firewall
inherits ::platform::ceph::params {
if $rgw_enabled {
if $service_enabled {
platform::firewall::rule { 'ceph-radosgw':
service_name => 'ceph-radosgw',
ports => $rgw_port,
@ -385,7 +384,7 @@ class platform::ceph::firewall
class platform::ceph::haproxy
inherits ::platform::ceph::params {
if $rgw_enabled {
if $service_enabled {
platform::haproxy::proxy { 'ceph-radosgw-restapi':
server_name => 's-ceph-radosgw',
public_port => $rgw_port,
@ -397,7 +396,7 @@ class platform::ceph::haproxy
class platform::ceph::rgw
inherits ::platform::ceph::params {
if $rgw_enabled {
if $service_enabled {
include ::platform::params
include ::openstack::keystone::params

View File

@ -148,7 +148,6 @@ class platform::haproxy::runtime {
}
include ::openstack::glance::haproxy
include ::openstack::cinder::haproxy
include ::openstack::aodh::haproxy
include ::openstack::heat::haproxy
include ::openstack::murano::haproxy
include ::openstack::magnum::haproxy

View File

@ -259,14 +259,10 @@ class platform::sm
# Ceph-Rados-Gateway
include ::platform::ceph::params
$ceph_configured = $::platform::ceph::params::service_enabled
$rgw_configured = $::platform::ceph::params::rgw_enabled
# Gnocchi
include ::openstack::gnocchi::params
# AODH
include ::openstack::aodh::params
# Panko
include ::openstack::panko::params
@ -337,7 +333,6 @@ class platform::sm
$ironic_configured = false
$magnum_configured = false
$gnocchi_enabled = false
$aodh_enabled = false
$panko_enabled = false
} else {
$heat_service_enabled = $::openstack::heat::params::service_enabled
@ -345,7 +340,6 @@ class platform::sm
$ironic_configured = $::openstack::ironic::params::service_enabled
$magnum_configured = $::openstack::magnum::params::service_enabled
$gnocchi_enabled = $::openstack::gnocchi::params::service_enabled
$aodh_enabled = $::openstack::aodh::params::service_enabled
$panko_enabled = $::openstack::panko::params::service_enabled
}
@ -1044,60 +1038,41 @@ class platform::sm
}
}
# AODH
if $aodh_enabled {
# AODH (not enabled)
exec { 'Deprovision OpenStack - AODH API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-api',
}
-> exec { 'Deprovision OpenStack - AODH API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-api',
}
exec { 'Configure OpenStack - AODH API':
command => "sm-configure service_instance aodh-api aodh-api \"config=/etc/aodh/aodh.conf\"",
}
exec { 'Deprovision OpenStack - AODH Evaluator (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-evaluator',
}
-> exec { 'Deprovision OpenStack - AODH Evaluator (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-evaluator',
}
exec { 'Configure OpenStack - AODH Evaluator':
command => "sm-configure service_instance aodh-evaluator aodh-evaluator \"config=/etc/aodh/aodh.conf\"",
}
exec { 'Deprovision OpenStack - AODH Listener (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-listener',
}
-> exec { 'Deprovision OpenStack - AODH Listener (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-listener',
}
exec { 'Configure OpenStack - AODH Listener':
command => "sm-configure service_instance aodh-listener aodh-listener \"config=/etc/aodh/aodh.conf\"",
}
exec { 'Configure OpenStack - AODH Notifier':
command => "sm-configure service_instance aodh-notifier aodh-notifier \"config=/etc/aodh/aodh.conf\"",
}
} else {
exec { 'Deprovision OpenStack - AODH API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-api',
}
-> exec { 'Deprovision OpenStack - AODH API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-api',
}
exec { 'Deprovision OpenStack - AODH Evaluator (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-evaluator',
}
-> exec { 'Deprovision OpenStack - AODH Evaluator (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-evaluator',
}
exec { 'Deprovision OpenStack - AODH Listener (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-listener',
}
-> exec { 'Deprovision OpenStack - AODH Listener (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-listener',
}
exec { 'Deprovision OpenStack - AODH Notifier (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-notifier',
}
-> exec { 'Deprovision OpenStack - AODH Notifier (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-notifier',
}
exec { 'Deprovision OpenStack - AODH Notifier (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-notifier',
}
-> exec { 'Deprovision OpenStack - AODH Notifier (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service aodh-notifier',
}
# Panko
@ -1618,7 +1593,7 @@ class platform::sm
}
# Ceph-Rados-Gateway
if $rgw_configured {
if $ceph_configured {
exec {'Provision Ceph-Rados-Gateway (service-group-member ceph-radosgw)':
command => 'sm-provision service-group-member storage-monitoring-services ceph-radosgw'
}

View File

@ -97,6 +97,7 @@ systemconfig.helm_plugins =
panko = sysinv.helm.panko:PankoHelm
rabbitmq = sysinv.helm.rabbitmq:RabbitmqHelm
rbd-provisioner = sysinv.helm.rbd_provisioner:RbdProvisionerHelm
ceph-pools-audit = sysinv.helm.ceph_pools_audit:CephPoolsAuditHelm
helm-toolkit = sysinv.helm.helm_toolkit:HelmToolkitHelm
sysinv.agent.lldp.drivers =

View File

@ -1015,6 +1015,9 @@ SERVICE_PARAM_NETWORK_ML2_EXT_DRIVERS = \
SERVICE_PARAM_NETWORK_ML2_TENANT_TYPES = \
['vlan', 'vxlan']
# service plugin for neutron network segment range feature
NEUTRON_PLUGIN_NETWORK_SEGMENT_RANGE = 'network_segment_range'
# a subset of Neutron service plugins that are supported
SERVICE_PARAM_NETWORK_DEFAULT_SERVICE_PLUGINS = \
['odl-router',
@ -1023,7 +1026,8 @@ SERVICE_PARAM_NETWORK_DEFAULT_SERVICE_PLUGINS = \
'networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin',
'neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin',
'networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin',
'router']
'router',
NEUTRON_PLUGIN_NETWORK_SEGMENT_RANGE]
# Neutron service plugins for SDN
SERVICE_PLUGINS_SDN = \
@ -1432,6 +1436,7 @@ HELM_CHART_OPENVSWITCH = 'openvswitch'
HELM_CHART_PANKO = 'panko'
HELM_CHART_RABBITMQ = 'rabbitmq'
HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner'
HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
HELM_CHART_HELM_TOOLKIT = 'helm-toolkit'
SUPPORTED_HELM_CHARTS = [
@ -1458,6 +1463,7 @@ SUPPORTED_HELM_CHARTS = [
HELM_CHART_PANKO,
HELM_CHART_RABBITMQ,
HELM_CHART_RBD_PROVISIONER,
HELM_CHART_CEPH_POOLS_AUDIT,
HELM_CHART_HELM_TOOLKIT,
]
@ -1472,6 +1478,7 @@ SUPPORTED_HELM_APP_CHARTS = {
HELM_APP_OPENSTACK: [
HELM_CHART_INGRESS,
HELM_CHART_RBD_PROVISIONER,
HELM_CHART_CEPH_POOLS_AUDIT,
HELM_CHART_MARIADB,
HELM_CHART_GARBD,
HELM_CHART_RABBITMQ,

View File

@ -79,7 +79,14 @@ class CeilometerHelm(openstack.OpenstackBaseHelm):
return {
'ceilometer': {
'DEFAULT': self._get_conf_ceilometer_default_overrides(),
'notification': self._get_conf_ceilometer_notification_overrides()
'notification': {
'messaging_urls': {
'values': self._get_notification_messaging_urls()
}
},
'meter': {
'meter_definitions_dirs': '/etc/ceilometer/meters.d'
}
}
}
@ -111,17 +118,6 @@ class CeilometerHelm(openstack.OpenstackBaseHelm):
return shared_services_types
def _get_conf_ceilometer_notification_overrides(self):
system = self._get_system()
if system.system_type == constants.TIS_AIO_BUILD:
batch_timeout = 25
else:
batch_timeout = 5
notification_overrides = {'batch_timeout': batch_timeout,
'messaging_urls': {'values': self._get_notification_messaging_urls()}}
return notification_overrides
def _get_notification_messaging_urls(self):
rabbit_user = 'rabbitmq-admin'
rabbit_pass = self._get_common_password(rabbit_user)

View File

@ -0,0 +1,90 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import exception
from sysinv.openstack.common import log as logging
from sysinv.helm import base
from sysinv.helm import common
LOG = logging.getLogger(__name__)
class CephPoolsAuditHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the ceph-pools-audit chart"""
CHART = constants.HELM_CHART_CEPH_POOLS_AUDIT
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'ceph-pools'
@property
def docker_repo_source(self):
return common.DOCKER_SRC_STX
@property
def docker_repo_tag(self):
return common.DOCKER_SRCS[self.docker_repo_source][common.IMG_TAG_KEY]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
backends = self.dbapi.storage_backend_get_list()
ceph_bks = [bk for bk in backends if bk.backend == constants.SB_TYPE_CEPH]
if not ceph_bks:
return {} # ceph is not configured
monitors = self._get_formatted_ceph_monitor_ips()
# Get tier info.
tiers = self.dbapi.storage_tier_get_list()
tiers_cfg = []
for bk in ceph_bks:
# Get the tier associated to the Ceph backend.
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
if not tier:
raise Exception("No tier present for backend %s" % bk.name)
# Get the ruleset name.
rule_name = "{0}{1}{2}".format(
tier.name,
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
# Tier config needed for the overrides.
tier_cfg = {
"name": bk.name.encode('utf8', 'strict'),
"replication": int(bk.capabilities.get("replication")),
"min_replication": int(bk.capabilities.get("min_replication")),
"crush_ruleset": rule_name.encode('utf8', 'strict'),
}
tiers_cfg.append(tier_cfg)
overrides = {
common.HELM_NS_OPENSTACK: {
'conf': {
'ceph': {
'monitors': monitors,
'storage_tiers': tiers_cfg
}
}
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides

View File

@ -95,7 +95,7 @@ class CinderHelm(openstack.OpenstackBaseHelm):
'DEFAULT': {
# Use the CEPH backup driver. The chart will create a pool for
# keeping the backups. All cinder backup pods will use it.
'backup_driver': 'cinder.backup.drivers.ceph',
'backup_driver': 'cinder.backup.drivers.ceph.CephBackupDriver',
'enabled_backends': ','.join(
b.name.encode('utf8', 'strict') for b in backends)
},

View File

@ -75,7 +75,7 @@ IMG_PREFIX_LOC = 'stx-'
TAGS_PIKE = 'pike'
TAGS_LATEST = 'latest'
TAGS_STX_LATEST = 'dev-centos-pike-latest'
TAGS_STX_LATEST = 'f-stein-centos-master-latest'
DOCKER_SRCS = {
DOCKER_SRC_OSH: {

View File

@ -56,7 +56,13 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
'neutron_sriov-agent': {
'hosts': self._get_per_host_overrides()
},
}
},
'paste': {
'app:neutronversions': {
'paste.app_factory':
'neutron.pecan_wsgi.app:versions_factory'
},
},
},
'endpoints': self._get_endpoints_overrides(),
'images': self._get_images_overrides(),
@ -89,9 +95,11 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
def update_dynamic_options(self, overrides):
if utils.is_virtual():
overrides.update({
'neutron': {
'vhost': {
'vhost_user_enabled': False
'plugins': {
'ml2_conf': {
'ovs_driver': {
'vhost_user_enabled': False
}
}
}
})

View File

@ -416,41 +416,6 @@ class NovaHelm(openstack.OpenstackBaseHelm):
address_pool = self.dbapi.address_pool_get(cluster_host_network.pool_uuid)
return '%s/%s' % (str(address_pool.network), str(address_pool.prefix))
def _update_host_memory(self, host, default_config):
vswitch_2M_pages = []
vswitch_1G_pages = []
vm_4K_pages = []
# The retrieved information is not necessarily ordered by numa node.
host_memory = self.dbapi.imemory_get_by_ihost(host.id)
# This makes it ordered by numa node.
memory_numa_list = utils.get_numa_index_list(host_memory)
# Process them in order of numa node.
for node, memory_list in memory_numa_list.items():
memory = memory_list[0]
# first the 4K memory
vm_hugepages_nr_4K = memory.vm_hugepages_nr_4K if (
memory.vm_hugepages_nr_4K is not None) else 0
vm_4K_pages.append(vm_hugepages_nr_4K)
# Now the vswitch memory of each hugepage size.
vswitch_2M_page = 0
vswitch_1G_page = 0
if memory.vswitch_hugepages_size_mib == constants.MIB_2M:
vswitch_2M_page = memory.vswitch_hugepages_nr
elif memory.vswitch_hugepages_size_mib == constants.MIB_1G:
vswitch_1G_page = memory.vswitch_hugepages_nr
vswitch_2M_pages.append(vswitch_2M_page)
vswitch_1G_pages.append(vswitch_1G_page)
# Build up the config values.
vswitch_2M = "\"%s\"" % ','.join([str(i) for i in vswitch_2M_pages])
vswitch_1G = "\"%s\"" % ','.join([str(i) for i in vswitch_1G_pages])
vm_4K = "\"%s\"" % ','.join([str(i) for i in vm_4K_pages])
# Add the new entries to the DEFAULT config section.
default_config.update({
'compute_vm_4K_pages': vm_4K,
'compute_vswitch_2M_pages': vswitch_2M,
'compute_vswitch_1G_pages': vswitch_1G,
})
def _get_per_host_overrides(self):
host_list = []
hosts = self.dbapi.ihost_get_list()
@ -469,7 +434,6 @@ class NovaHelm(openstack.OpenstackBaseHelm):
self._update_host_storage(host, default_config, libvirt_config)
self._update_host_addresses(host, default_config, vnc_config,
libvirt_config)
self._update_host_memory(host, default_config)
self._update_host_pci_whitelist(host, pci_config)
host_nova = {
'name': hostname,

View File

@ -103,8 +103,6 @@ class CephPuppet(openstack.OpenstackBasePuppet):
'platform::ceph::params::mon_1_addr': mon_1_addr,
'platform::ceph::params::mon_2_addr': mon_2_addr,
'platform::ceph::params::rgw_enabled':
ceph_backend.object_gateway,
'platform::ceph::params::rgw_admin_user':
ksuser,
'platform::ceph::params::rgw_admin_domain':