config/kubernetes/applications/stx-openstack/stx-openstack-helm/stx-openstack-helm/manifests/manifest.yaml

2677 lines
88 KiB
YAML

---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
values: {}
source:
type: tar
location: http://172.17.0.1/helm_charts/helm-toolkit-0.1.0.tgz
subpath: helm-toolkit
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-ingress
data:
chart_name: ingress
release: kube-system-ingress
namespace: kube-system
wait:
timeout: 1800
labels:
release_group: osh-kube-system-ingress
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-kube-system-ingress
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
error_page: 2
ingress: 2
source:
type: tar
location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-ingress
data:
chart_name: ingress
release: openstack-ingress
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-ingress
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-ingress
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
error_page: 2
ingress: 2
source:
type: tar
location: http://172.17.0.1/helm_charts/ingress-0.1.0.tgz
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-rbd-provisioner
data:
chart_name: rbd-provisioner
release: openstack-rbd-provisioner
namespace: openstack
wait:
timeout: 1800
labels:
app: rbd-provisioner
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: rbd-provisioner
source:
type: tar
location: http://172.17.0.1/helm_charts/rbd-provisioner-0.1.0.tgz
subpath: rbd-provisioner
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
data:
chart_name: mariadb
release: openstack-mariadb
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-mariadb
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-mariadb
values:
monitoring:
prometheus:
enabled: true
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
source:
type: tar
location: http://172.17.0.1/helm_charts/mariadb-0.1.0.tgz
subpath: mariadb
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-garbd
data:
chart_name: garbd
release: openstack-garbd
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-garbd
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-garbd
values:
labels:
server:
node_selector_key: openstack-compute-node
node_selector_value: enabled
source:
type: tar
location: http://172.17.0.1/helm_charts/garbd-0.1.0.tgz
subpath: garbd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-memcached
data:
chart_name: memcached
release: openstack-memcached
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-memcached
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-memcached
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_memcached_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
monitoring:
prometheus:
enabled: true
source:
type: tar
location: http://172.17.0.1/helm_charts/memcached-0.1.0.tgz
subpath: memcached
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-rabbitmq
data:
chart_name: rabbitmq
release: openstack-rabbitmq
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-rabbitmq
test:
enabled: true
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-rabbitmq
- type: pod
labels:
release_group: osh-openstack-rabbitmq
component: test
values:
monitoring:
prometheus:
enabled: true
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_rabbitmq_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source:
type: tar
location: http://172.17.0.1/helm_charts/rabbitmq-0.1.0.tgz
subpath: rabbitmq
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-keystone
data:
chart_name: keystone
release: openstack-keystone
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-keystone
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-keystone
- type: pod
labels:
release_group: osh-openstack-keystone
component: test
values:
endpoints:
identity:
name: keystone
namespace: openstack
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
api: 2
source:
type: tar
location: http://172.17.0.1/helm_charts/keystone-0.1.0.tgz
subpath: keystone
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-glance
data:
chart_name: glance
release: openstack-glance
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-glance
test:
enabled: true
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-glance
- type: pod
labels:
release_group: osh-openstack-glance
component: test
values:
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
registry:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
api: 2
registry: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source:
type: tar
location: http://172.17.0.1/helm_charts/glance-0.1.0.tgz
subpath: glance
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-cinder
data:
chart_name: cinder
release: openstack-cinder
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-cinder
test:
enabled: true
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-cinder
- type: pod
labels:
release_group: osh-openstack-cinder
component: test
values:
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
backup:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
scheduler:
node_selector_key: openstack-control-plane
node_selector_value: enabled
volume:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
api: 2
volume: 1
scheduler: 1
backup: 1
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
conf:
cinder:
DEFAULT:
backup_driver: cinder.backup.drivers.swift
source:
type: tar
location: http://172.17.0.1/helm_charts/cinder-0.1.0.tgz
subpath: cinder
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-libvirt
data:
chart_name: libvirt
release: openstack-libvirt
namespace: openstack
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
agent:
libvirt:
node_selector_key: openstack-compute-node
node_selector_value: enabled
source:
type: tar
location: http://172.17.0.1/helm_charts/libvirt-0.1.0.tgz
subpath: libvirt
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-nova
data:
chart_name: nova
release: openstack-nova
namespace: openstack
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: nova
component: db-init
- type: job
labels:
application: nova
component: db-sync
- type: job
labels:
application: nova
component: ks-user
- type: job
labels:
application: nova
component: ks-service
- type: job
labels:
application: placement
component: ks-user
- type: job
labels:
application: placement
component: ks-service
- type: job
labels:
application: placement
component: ks-endpoints
- type: job
labels:
application: nova
component: cell-setup
values:
manifests:
job_ks_endpoints: false
ingress_osapi: false
service_ingress_osapi: false
cron_job_cell_setup: false
cron_job_service_cleaner: false
labels:
agent:
compute:
node_selector_key: openstack-compute-node
node_selector_value: enabled
compute_ironic:
node_selector_key: openstack-compute-node
node_selector_value: enabled
api_metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conductor:
node_selector_key: openstack-control-plane
node_selector_value: enabled
consoleauth:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
novncproxy:
node_selector_key: openstack-control-plane
node_selector_value: enabled
osapi:
node_selector_key: openstack-control-plane
node_selector_value: enabled
placement:
node_selector_key: openstack-control-plane
node_selector_value: enabled
scheduler:
node_selector_key: openstack-control-plane
node_selector_value: enabled
spiceproxy:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
api_metadata: 1
placement: 1
osapi: 1
conductor: 1
consoleauth: 1
scheduler: 1
novncproxy: 1
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
user:
nova:
uid: 0
conf:
ceph:
enabled: true
nova:
DEFAULT:
default_mempages_size: 2048
reserved_host_memory_mb: 0
compute_monitors: cpu.virt_driver
running_deleted_instance_poll_interval: 60
mkisofs_cmd: /usr/bin/genisoimage
network_allocate_retries: 2
force_raw_images: false
concurrent_disk_operations: 2
# Set number of block device allocate retries and interval
# for volume create when VM boots and creates a new volume.
# The total block allocate retries time is set to 2 hours
# to satisfy the volume allocation time on slow RPM disks
# which may take 1 hour and a half per volume when several
# volumes are created in parallel.
block_device_allocate_retries_interval: 3
block_device_allocate_retries: 2400
disk_allocation_ratio: 1.0
cpu_allocation_ratio: 16.0
ram_allocation_ratio: 1.0
remove_unused_original_minimum_age_seconds: 3600
enable_new_services: false
map_new_hosts: false
libvirt:
cpu_mode: none
live_migration_completion_timeout: 180
live_migration_permit_auto_converge: true
mem_stats_period_seconds: 0
rbd_secret_uuid: null
rbd_user: null
# Allow up to 1 day for resize conf
remove_unused_resized_minimum_age_seconds: 86400
database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
api_database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
cell0_database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
placement:
os_interface: internal
neutron:
default_floating_pool: public
notifications:
notification_format: unversioned
filter_scheduler:
disk_weight_multiplier: 0.0
enabled_filters:
- RetryFilter
- ComputeFilter
- AvailabilityZoneFilter
- AggregateInstanceExtraSpecsFilter
- ComputeCapabilitiesFilter
- ImagePropertiesFilter
- NUMATopologyFilter
- ServerGroupAffinityFilter
- ServerGroupAntiAffinityFilter
- PciPassthroughFilter
- DiskFilter
io_ops_weight_multiplier: -5.0
pci_weight_multiplier: 0.0
ram_weight_multiplier: 0.0
soft_affinity_weight_multiplier: 0.0
soft_anti_affinity_weight_multiplier: 0.0
scheduler:
discover_hosts_in_cells_interval: 30
periodic_task_interval: -1
upgrade_levels: None
metrics:
required: false
network:
sshd:
enabled: true
source:
type: tar
location: http://172.17.0.1/helm_charts/nova-0.1.0.tgz
subpath: nova
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-nova-api-proxy
data:
chart_name: nova-api-proxy
release: openstack-nova-api-proxy
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-nova-api-proxy
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-nova-api-proxy
- type: pod
labels:
release_group: osh-openstack-nova-api-proxy
component: test
source:
type: tar
location: http://172.17.0.1/helm_charts/nova-api-proxy-0.1.0.tgz
subpath: nova-api-proxy
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-neutron
data:
chart_name: neutron
release: openstack-neutron
namespace: openstack
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: neutron
component: db-init
- type: job
labels:
application: neutron
component: db-sync
- type: job
labels:
application: neutron
component: ks-user
- type: job
labels:
application: neutron
component: ks-service
- type: job
labels:
application: neutron
component: ks-endpoints
values:
pod:
replicas:
server: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
labels:
agent:
dhcp:
node_selector_key: openstack-control-plane
node_selector_value: enabled
l3:
node_selector_key: openstack-control-plane
node_selector_value: enabled
metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
lb:
node_selector_key: linuxbridge
node_selector_value: enabled
# ovs is a special case, requiring a special
# label that can apply to both control hosts
# and compute hosts, until we get more sophisticated
# with our daemonset scheduling
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
network:
interface:
tunnel: enp0s3
conf:
neutron:
DEFAULT:
l3_ha: true
min_l3_agents_per_router: 2
max_l3_agents_per_router: 5
l3_ha_network_type: vxlan
dhcp_agents_per_network: 2
plugins:
ml2_conf:
ml2_type_flat:
flat_networks: public
openvswitch_agent:
agent:
tunnel_types: vxlan
ovs:
bridge_mappings: public:br-ex
source:
type: tar
location: http://172.17.0.1/helm_charts/neutron-0.1.0.tgz
subpath: neutron
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-heat
data:
chart_name: heat
release: openstack-heat
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-heat
test:
enabled: true
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-heat
- type: pod
labels:
release_group: osh-openstack-heat
component: test
values:
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
cfn:
node_selector_key: openstack-control-plane
node_selector_value: enabled
cloudwatch:
node_selector_key: openstack-control-plane
node_selector_value: enabled
engine:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
api: 2
cfn: 2
cloudwatch: 2
engine: 2
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
source:
type: tar
location: http://172.17.0.1/helm_charts/heat-0.1.0.tgz
subpath: heat
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-aodh
data:
chart_name: aodh
release: openstack-aodh
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-aodh
test:
enabled: true
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-aodh
- type: pod
labels:
release_group: osh-openstack-aodh
component: test
values:
pod:
user:
aodh:
uid: 0
jobs:
alarms_cleaner:
# daily at the 35 minute mark
cron: "35 */24 * * *"
source:
type: tar
location: http://172.17.0.1/helm_charts/aodh-0.1.0.tgz
subpath: aodh
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-gnocchi
data:
chart_name: gnocchi
release: openstack-gnocchi
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-gnocchi
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-gnocchi
- type: pod
labels:
release_group: osh-openstack-gnocchi
component: test
values:
conf:
apache: |
Listen 0.0.0.0:{{ tuple "metric" "internal" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
CustomLog /dev/stdout combined env=!forwarded
CustomLog /dev/stdout proxy env=forwarded
<VirtualHost *:{{ tuple "metric" "internal" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}>
WSGIDaemonProcess gnocchi processes=1 threads=2 user=gnocchi group=gnocchi display-name=%{GROUP}
WSGIProcessGroup gnocchi
WSGIScriptAlias / "/var/lib/openstack/bin/gnocchi-api"
WSGIApplicationGroup %{GLOBAL}
ErrorLog /dev/stdout
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
CustomLog /dev/stdout combined env=!forwarded
CustomLog /dev/stdout proxy env=forwarded
<Directory "/var/lib/openstack/bin">
Require all granted
</Directory>
</VirtualHost>
paste:
composite:gnocchi+basic:
use: egg:Paste#urlmap
/: gnocchiversions_pipeline
/v1: gnocchiv1+noauth
/healthcheck: healthcheck
composite:gnocchi+keystone:
use: egg:Paste#urlmap
/: gnocchiversions_pipeline
/v1: gnocchiv1+keystone
/healthcheck: healthcheck
composite:gnocchi+remoteuser:
use: egg:Paste#urlmap
/: gnocchiversions_pipeline
/v1: gnocchiv1+noauth
/healthcheck: healthcheck
pipeline:gnocchiv1+noauth:
pipeline: http_proxy_to_wsgi gnocchiv1
pipeline:gnocchiv1+keystone:
pipeline: http_proxy_to_wsgi keystone_authtoken gnocchiv1
pipeline:gnocchiversions_pipeline:
pipeline: http_proxy_to_wsgi gnocchiversions
app:gnocchiversions:
paste.app_factory: gnocchi.rest.app:app_factory
root: gnocchi.rest.api.VersionsController
app:gnocchiv1:
paste.app_factory: gnocchi.rest.app:app_factory
root: gnocchi.rest.api.V1Controller
filter:keystone_authtoken:
use: egg:keystonemiddleware#auth_token
oslo_config_project: gnocchi
filter:http_proxy_to_wsgi:
use: egg:oslo.middleware#http_proxy_to_wsgi
oslo_config_project: gnocchi
app:healthcheck:
use: egg:oslo.middleware#healthcheck
oslo_config_project: gnocchi
source:
type: tar
location: http://172.17.0.1/helm_charts/gnocchi-0.1.0.tgz
subpath: gnocchi
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-panko
data:
chart_name: panko
release: openstack-panko
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-panko
test:
enabled: true
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-panko
- type: pod
labels:
release_group: osh-openstack-panko
component: test
values:
pod:
user:
panko:
uid: 0
jobs:
events_cleaner:
# hourly at the 10 minute mark
cron: "10 * * * *"
source:
type: tar
location: http://172.17.0.1/helm_charts/panko-0.1.0.tgz
subpath: panko
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-ceilometer
data:
chart_name: ceilometer
release: openstack-ceilometer
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-ceilometer
test:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-ceilometer
- type: pod
labels:
release_group: osh-openstack-ceilometer
component: test
values:
dependencies:
static:
central:
jobs:
- ceilometer-db-sync
- ceilometer-rabbit-init
services: null
compute:
jobs:
- ceilometer-db-sync
- ceilometer-rabbit-init
services: null
ipmi:
jobs:
- ceilometer-db-sync
- ceilometer-rabbit-init
services: null
notification:
jobs:
- ceilometer-db-sync
- ceilometer-rabbit-init
services:
- endpoint: internal
service: event
db_sync:
jobs:
- ceilometer-ks-user
- ceilometer-ks-service
services:
- endpoint: internal
service: identity
- endpoint: internal
service: metric
manifests:
deployment_api: false
deployment_collector: false
service_api: false
job_db_init: false
job_db_init_mongodb: false
job_ks_endpoints: false
secret_db: false
secret_mongodb: false
endpoints:
oslo_cache:
hosts:
default: memcached
event:
name: panko
hosts:
default: panko-api
public: panko
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 8977
public: 80
conf:
ceilometer:
DEFAULT:
csv_location: /var/lib/ceilometer/
csv_location_strict: true
shuffle_time_before_polling_task: 30
batch_polled_samples: true
dispatcher_gnocchi:
archive_policy: ''
filter_project: ''
cache:
expiration_time: 86400
compute:
resource_update_interval: 60
instance_discovery_method: workload_partitioning
oslo_messaging_notifications:
topics:
- notifications
pipeline:
sources:
- name: meter_source
meters:
- "*"
sinks:
- meter_sink
- csv_sink
- name: cpu_source
meters:
- "cpu"
sinks:
- cpu_sink
- cpu_delta_sink
- vcpu_sink
- name: disk_source
meters:
- "disk.read.bytes"
- "disk.read.requests"
- "disk.write.bytes"
- "disk.write.requests"
- "disk.device.read.bytes"
- "disk.device.read.requests"
- "disk.device.write.bytes"
- "disk.device.write.requests"
sinks:
- disk_sink
- name: network_source
meters:
- "network.incoming.bytes"
- "network.incoming.packets"
- "network.outgoing.bytes"
- "network.outgoing.packets"
sinks:
- network_sink
sinks:
- name: meter_sink
transformers:
publishers:
- gnocchi://
- name: cpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "cpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
publishers:
- gnocchi://
- name: cpu_delta_sink
transformers:
- name: "delta"
parameters:
target:
name: "cpu.delta"
growth_only: true
publishers:
- gnocchi://
- name: vcpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "vcpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.vcpu_number or 1))"
publishers:
- gnocchi://
- name: disk_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
unit: "(B|request)"
target:
map_to:
name: "\\1.\\2.\\3.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: network_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
unit: "(B|packet)"
target:
map_to:
name: "network.\\1.\\2.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: csv_sink
publishers:
- csvfile:///var/lib/ceilometer/pm.csv?max_bytes=10000000&backup_count=5&compress=True&enabled=True
event_pipeline:
sources:
- name: event_source
events:
- "*"
sinks:
- event_sink
sinks:
- name: event_sink
transformers:
publishers:
- panko://
- gnocchi://
polling:
sources:
- name: instance_pollster
interval: 600
meters:
- disk.read.bytes
- disk.read.bytes.rate
- disk.read.requests
- disk.read.requests.rate
- disk.write.bytes
- disk.write.bytes.rate
- disk.write.requests
- disk.write.requests.rate
- disk.capacity
- disk.allocation
- disk.usage
- name: instance_cpu_pollster
interval: 30
meters:
- cpu
- name: instance_disk_pollster
interval: 600
meters:
- disk.device.read.requests
- disk.device.read.requests.rate
- disk.device.write.requests
- disk.device.write.requests.rate
- disk.device.read.bytes
- disk.device.read.bytes.rate
- disk.device.write.bytes
- disk.device.write.bytes.rate
- disk.device.capacity
- disk.device.allocation
- disk.device.usage
- name: ipmi_pollster
interval: 600
meters:
- hardware.ipmi.node.power
- hardware.ipmi.node.temperature
- hardware.ipmi.node.outlet_temperature
- hardware.ipmi.node.airflow
- hardware.ipmi.node.cups
- hardware.ipmi.node.cpu_util
- hardware.ipmi.node.mem_util
- hardware.ipmi.node.io_util
- hardware.ipmi.temperature
- hardware.ipmi.voltage
- hardware.ipmi.current
- hardware.ipmi.fan
- name: ceph_pollster
interval: 600
meters:
- radosgw.objects
- radosgw.objects.size
- radosgw.objects.containers
- radosgw.api.request
- radosgw.containers.objects
- radosgw.containers.objects.size
- name: image_pollster
interval: 600
meters:
- image.size
- name: volume_pollster
interval: 600
meters:
- volume.size
- volume.snapshot.size
- volume.backup.size
gnocchi_resources:
archive_policy_default: ceilometer-low
archive_policies:
- name: ceilometer-low
aggregation_methods:
- mean
back_window: 0
definition:
- granularity: 5 minutes
timespan: 7 days
- name: ceilometer-low-rate
aggregation_methods:
- mean
- rate:mean
back_window: 0
definition:
- granularity: 5 minutes
timespan: 7 days
resources:
- resource_type: identity
metrics:
identity.authenticate.success:
identity.authenticate.pending:
identity.authenticate.failure:
identity.user.created:
identity.user.deleted:
identity.user.updated:
identity.group.created:
identity.group.deleted:
identity.group.updated:
identity.role.created:
identity.role.deleted:
identity.role.updated:
identity.project.created:
identity.project.deleted:
identity.project.updated:
identity.trust.created:
identity.trust.deleted:
identity.role_assignment.created:
identity.role_assignment.deleted:
- resource_type: ceph_account
metrics:
radosgw.objects:
radosgw.objects.size:
radosgw.objects.containers:
radosgw.api.request:
radosgw.containers.objects:
radosgw.containers.objects.size:
- resource_type: instance
metrics:
memory:
memory.usage:
memory.resident:
memory.swap.in:
memory.swap.out:
memory.bandwidth.total:
memory.bandwidth.local:
vcpus:
archive_policy_name: ceilometer-low-rate
vcpu_util:
cpu:
archive_policy_name: ceilometer-low-rate
cpu.delta:
cpu_util:
cpu_l3_cache:
disk.root.size:
disk.ephemeral.size:
disk.read.requests:
archive_policy_name: ceilometer-low-rate
disk.read.requests.rate:
disk.write.requests:
archive_policy_name: ceilometer-low-rate
disk.write.requests.rate:
disk.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.read.bytes.rate:
disk.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.write.bytes.rate:
disk.latency:
disk.iops:
disk.capacity:
disk.allocation:
disk.usage:
compute.instance.booting.time:
perf.cpu.cycles:
perf.instructions:
perf.cache.references:
perf.cache.misses:
attributes:
host: resource_metadata.(instance_host|host)
image_ref: resource_metadata.image_ref
display_name: resource_metadata.display_name
flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id)
flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name)
server_group: resource_metadata.user_metadata.server_group
event_delete: compute.instance.delete.start
event_attributes:
id: instance_id
event_associated_resources:
instance_network_interface: '{"=": {"instance_id": "%s"}}'
instance_disk: '{"=": {"instance_id": "%s"}}'
- resource_type: instance_network_interface
metrics:
network.outgoing.packets.rate:
network.incoming.packets.rate:
network.outgoing.packets:
archive_policy_name: ceilometer-low-rate
network.incoming.packets:
archive_policy_name: ceilometer-low-rate
network.outgoing.packets.drop:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.drop:
archive_policy_name: ceilometer-low-rate
network.outgoing.packets.error:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.error:
archive_policy_name: ceilometer-low-rate
network.outgoing.bytes.rate:
network.incoming.bytes.rate:
network.outgoing.bytes:
archive_policy_name: ceilometer-low-rate
network.incoming.bytes:
archive_policy_name: ceilometer-low-rate
attributes:
name: resource_metadata.vnic_name
instance_id: resource_metadata.instance_id
- resource_type: instance_disk
metrics:
disk.device.read.requests:
archive_policy_name: ceilometer-low-rate
disk.device.read.requests.rate:
disk.device.write.requests:
archive_policy_name: ceilometer-low-rate
disk.device.write.requests.rate:
disk.device.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.read.bytes.rate:
disk.device.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.write.bytes.rate:
disk.device.latency:
disk.device.read.latency:
disk.device.write.latency:
disk.device.iops:
disk.device.capacity:
disk.device.allocation:
disk.device.usage:
attributes:
name: resource_metadata.disk_name
instance_id: resource_metadata.instance_id
- resource_type: image
metrics:
image.size:
image.download:
image.serve:
attributes:
name: resource_metadata.name
container_format: resource_metadata.container_format
disk_format: resource_metadata.disk_format
event_delete: image.delete
event_attributes:
id: resource_id
- resource_type: ipmi
metrics:
hardware.ipmi.node.power:
hardware.ipmi.node.temperature:
hardware.ipmi.node.inlet_temperature:
hardware.ipmi.node.outlet_temperature:
hardware.ipmi.node.fan:
hardware.ipmi.node.current:
hardware.ipmi.node.voltage:
hardware.ipmi.node.airflow:
hardware.ipmi.node.cups:
hardware.ipmi.node.cpu_util:
hardware.ipmi.node.mem_util:
hardware.ipmi.node.io_util:
hardware.ipmi.temperature:
hardware.ipmi.voltage:
hardware.ipmi.current:
hardware.ipmi.fan:
- resource_type: network
metrics:
bandwidth:
ip.floating:
event_delete: floatingip.delete.end
event_attributes:
id: resource_id
- resource_type: stack
metrics:
stack.create:
stack.update:
stack.delete:
stack.resume:
stack.suspend:
- resource_type: swift_account
metrics:
storage.objects.incoming.bytes:
storage.objects.outgoing.bytes:
storage.api.request:
storage.objects.size:
storage.objects:
storage.objects.containers:
storage.containers.objects:
storage.containers.objects.size:
- resource_type: volume
metrics:
volume:
volume.size:
snapshot.size:
volume.snapshot.size:
volume.backup.size:
attributes:
display_name: resource_metadata.(display_name|name)
volume_type: resource_metadata.volume_type
event_delete: volume.delete.start
event_attributes:
id: resource_id
- resource_type: volume_provider
metrics:
volume.provider.capacity.total:
volume.provider.capacity.free:
volume.provider.capacity.allocated:
volume.provider.capacity.provisioned:
volume.provider.capacity.virtual_free:
- resource_type: volume_provider_pool
metrics:
volume.provider.pool.capacity.total:
volume.provider.pool.capacity.free:
volume.provider.pool.capacity.allocated:
volume.provider.pool.capacity.provisioned:
volume.provider.pool.capacity.virtual_free:
attributes:
provider: resource_metadata.provider
- resource_type: host
metrics:
hardware.cpu.load.1min:
hardware.cpu.load.5min:
hardware.cpu.load.15min:
hardware.cpu.util:
hardware.memory.total:
hardware.memory.used:
hardware.memory.swap.total:
hardware.memory.swap.avail:
hardware.memory.buffer:
hardware.memory.cached:
hardware.network.ip.outgoing.datagrams:
hardware.network.ip.incoming.datagrams:
hardware.system_stats.cpu.idle:
hardware.system_stats.io.outgoing.blocks:
hardware.system_stats.io.incoming.blocks:
attributes:
host_name: resource_metadata.resource_url
- resource_type: host_disk
metrics:
hardware.disk.size.total:
hardware.disk.size.used:
hardware.disk.read.bytes:
hardware.disk.write.bytes:
hardware.disk.read.requests:
hardware.disk.write.requests:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.device
- resource_type: host_network_interface
metrics:
hardware.network.incoming.bytes:
hardware.network.outgoing.bytes:
hardware.network.outgoing.errors:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.name
- resource_type: nova_compute
metrics:
compute.node.cpu.frequency:
compute.node.cpu.idle.percent:
compute.node.cpu.idle.time:
compute.node.cpu.iowait.percent:
compute.node.cpu.iowait.time:
compute.node.cpu.kernel.percent:
compute.node.cpu.kernel.time:
compute.node.cpu.percent:
compute.node.cpu.user.percent:
compute.node.cpu.user.time:
attributes:
host_name: resource_metadata.host
- resource_type: manila_share
metrics:
manila.share.size:
attributes:
name: resource_metadata.name
host: resource_metadata.host
status: resource_metadata.status
availability_zone: resource_metadata.availability_zone
protocol: resource_metadata.protocol
- resource_type: switch
metrics:
switch:
switch.ports:
attributes:
controller: resource_metadata.controller
- resource_type: switch_port
metrics:
switch.port:
switch.port.uptime:
switch.port.receive.packets:
switch.port.transmit.packets:
switch.port.receive.bytes:
switch.port.transmit.bytes:
switch.port.receive.drops:
switch.port.transmit.drops:
switch.port.receive.errors:
switch.port.transmit.errors:
switch.port.receive.frame_error:
switch.port.receive.overrun_error:
switch.port.receive.crc_error:
switch.port.collision.count:
attributes:
switch: resource_metadata.switch
port_number_on_switch: resource_metadata.port_number_on_switch
neutron_port_id: resource_metadata.neutron_port_id
controller: resource_metadata.controller
- resource_type: port
metrics:
port:
port.uptime:
port.receive.packets:
port.transmit.packets:
port.receive.bytes:
port.transmit.bytes:
port.receive.drops:
port.receive.errors:
attributes:
controller: resource_metadata.controller
- resource_type: switch_table
metrics:
switch.table.active.entries:
attributes:
controller: resource_metadata.controller
switch: resource_metadata.switch
source:
type: tar
location: http://172.17.0.1/helm_charts/ceilometer-0.1.0.tgz
subpath: ceilometer
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-horizon
data:
chart_name: horizon
release: openstack-horizon
namespace: openstack
wait:
timeout: 1800
labels:
release_group: osh-openstack-horizon
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: osh-openstack-horizon
values:
conf:
horizon:
local_settings:
template: |
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG = {{ .Values.conf.horizon.local_settings.config.debug }}
TEMPLATE_DEBUG = DEBUG
COMPRESS_OFFLINE = True
COMPRESS_CSS_HASHING_METHOD = "hash"
# WEBROOT is the location relative to Webserver root
# should end with a slash.
WEBROOT = '/'
# LOGIN_URL = WEBROOT + 'auth/login/'
# LOGOUT_URL = WEBROOT + 'auth/logout/'
#
# LOGIN_REDIRECT_URL can be used as an alternative for
# HORIZON_CONFIG.user_home, if user_home is not set.
# Do not set it to '/home/', as this will cause circular redirect loop
# LOGIN_REDIRECT_URL = WEBROOT
# Required for Django 1.5.
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# https://docs.djangoproject.com/en/1.5/ref/settings/#secure-proxy-ssl-header
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# Versions specified here should be integers or floats, not strings.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be 2.0 or 3.
#OPENSTACK_API_VERSIONS = {
# "data-processing": 1.1,
# "identity": 3,
# "volume": 2,
#}
OPENSTACK_API_VERSIONS = {
"identity": 3,
}
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = {{ .Values.conf.horizon.local_settings.config.keystone_multidomain_support }}
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = '{{ .Values.conf.horizon.local_settings.config.keystone_default_domain }}'
# Set Console type:
# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
# Set to None explicitly if you want to deactivate the console.
#CONSOLE_TYPE = "AUTO"
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG = {
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'modal_backdrop': 'static',
'angular_modules': [],
'js_files': [],
'js_spec_files': [],
}
# Specify a regular expression to validate user passwords.
#HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements."),
#}
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
#HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for forms including the login form and
# the database creation workflow if so desired.
#HORIZON_CONFIG["password_autocomplete"] = "off"
# Setting this to True will disable the reveal button for password fields,
# including on the login form.
#HORIZON_CONFIG["disable_password_reveal"] = False
LOCAL_PATH = '/tmp'
# Set custom secret key:
# You can either set it to a specific value or you can let horizon generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
# there may be situations where you would want to set this explicitly, e.g.
# when multiple dashboard instances are distributed on different machines
# (usually behind a load-balancer). Either you have to make sure that a session
# gets all requests routed to the same dashboard instance or you set the same
# SECRET_KEY for all of them.
SECRET_KEY='{{ .Values.conf.horizon.local_settings.config.horizon_secret_key }}'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{{ tuple "oslo_cache" "internal" "memcache" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}',
}
}
DATABASES = {
'default': {
# Database configuration here
'ENGINE': 'django.db.backends.mysql',
'NAME': '{{ .Values.endpoints.oslo_db.path | base }}',
'USER': '{{ .Values.endpoints.oslo_db.auth.horizon.username }}',
'PASSWORD': '{{ .Values.endpoints.oslo_db.auth.horizon.password }}',
'HOST': '{{ tuple "oslo_db" "internal" . | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}',
'default-character-set': 'utf8',
'PORT': '{{ tuple "oslo_db" "internal" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}'
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
#EMAIL_HOST = 'smtp.my-company.com'
#EMAIL_PORT = 25\\
#EMAIL_HOST_USER = 'djangomail'
#EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
#AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
#]
OPENSTACK_KEYSTONE_URL = "{{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "member"
{{- if .Values.conf.horizon.local_settings.config.auth.sso.enabled }}
# Enables keystone web single-sign-on if set to True.
WEBSSO_ENABLED = True
# Determines which authentication choice to show as default.
WEBSSO_INITIAL_CHOICE = "{{ .Values.conf.horizon.local_settings.config.auth.sso.initial_choice }}"
# The list of authentication mechanisms
# which include keystone federation protocols.
# Current supported protocol IDs are 'saml2' and 'oidc'
# which represent SAML 2.0, OpenID Connect respectively.
# Do not remove the mandatory credentials mechanism.
WEBSSO_CHOICES = (
("credentials", _("Keystone Credentials")),
{{- range $i, $sso := .Values.conf.horizon.local_settings.config.auth.idp_mapping }}
({{ $sso.name | quote }}, {{ $sso.label | quote }}),
{{- end }}
)
WEBSSO_IDP_MAPPING = {
{{- range $i, $sso := .Values.conf.horizon.local_settings.config.auth.idp_mapping }}
{{ $sso.name | quote}}: ({{ $sso.idp | quote }}, {{ $sso.protocol | quote }}),
{{- end }}
}
{{- end }}
# Disable SSL certificate checks (useful for self-signed certificates):
#OPENSTACK_SSL_NO_VERIFY = True
# The CA certificate to use to verify SSL connections
#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True,
}
# Setting this to True, will add a new "Retrieve Password" action on instance,
# allowing Admin session password retrieval/decryption.
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
# The Launch Instance user experience has been significantly enhanced.
# You can choose whether to enable the new launch instance experience,
# the legacy experience, or both. The legacy experience will be removed
# in a future release, but is available as a temporary backup setting to ensure
# compatibility with existing deployments. Further development will not be
# done on the legacy experience. Please report any problems with the new
# experience via the Launchpad tracking system.
#
# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
# determine the experience to enable. Set them both to true to enable
# both.
#LAUNCH_INSTANCE_LEGACY_ENABLED = True
#LAUNCH_INSTANCE_NG_ENABLED = False
# The Xen Hypervisor has the ability to set the mount point for volumes
# attached to instances (other Hypervisors currently do not). Setting
# can_set_mount_point to True will add the option to set the mount point
# from the UI.
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
}
# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
# services provided by cinder that is not exposed by its extension API.
OPENSTACK_CINDER_FEATURES = {
'enable_backup': {{ .Values.conf.horizon.local_settings.config.openstack_cinder_features.enable_backup }},
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_router }},
'enable_quotas': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_quotas }},
'enable_ipv6': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_ipv6 }},
'enable_distributed_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_distributed_router }},
'enable_ha_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_ha_router }},
'enable_lb': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_lb }},
'enable_firewall': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_firewall }},
'enable_vpn': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_vpn }},
'enable_fip_topology_check': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_fip_topology_check }},
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
# Set which provider network types are supported. Only the network types
# in this list will be available to choose from when creating a network.
# Network types include local, flat, vlan, gre, and vxlan.
'supported_provider_types': ['*'],
# Set which VNIC types are supported for port binding. Only the VNIC
# types in this list will be available to choose from when creating a
# port.
# VNIC types include 'normal', 'macvtap' and 'direct'.
'supported_vnic_types': ['*']
}
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
#OPENSTACK_IMAGE_BACKEND = {
# 'image_formats': [
# ('', _('Select format')),
# ('aki', _('AKI - Amazon Kernel Image')),
# ('ami', _('AMI - Amazon Machine Image')),
# ('ari', _('ARI - Amazon Ramdisk Image')),
# ('docker', _('Docker')),
# ('iso', _('ISO - Optical Disk Image')),
# ('ova', _('OVA - Open Virtual Appliance')),
# ('qcow2', _('QCOW2 - QEMU Emulator')),
# ('raw', _('Raw')),
# ('vdi', _('VDI - Virtual Disk Image')),
# ('vhd', ('VHD - Virtual Hard Disk')),
# ('vmdk', _('VMDK - Virtual Machine Disk')),
# ]
#}
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type"),
}
# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
# custom properties should not be displayed in the Image Custom Properties
# table.
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
OPENSTACK_ENDPOINT_TYPE = "internalURL"
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
SECONDARY_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The size of chunk in bytes for downloading objects from Swift
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 30
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
#CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
#}
# Set this to True to display an 'Admin Password' field on the Change Password
# form to verify that it is indeed the admin logged-in who wants to change
# the password.
# ENFORCE_PASSWORD_CHECK = False
# Modules that provide /auth routes that can be used to handle different types
# of user authentication. Add auth plugins that require extra route handling to
# this list.
#AUTHENTICATION_URLS = [
# 'openstack_auth.urls',
#]
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
POLICY_FILES_PATH = '/etc/openstack-dashboard'
# Map of local copy of service policy files
#POLICY_FILES = {
# 'identity': 'keystone_policy.json',
# 'compute': 'nova_policy.json',
# 'volume': 'cinder_policy.json',
# 'image': 'glance_policy.json',
# 'orchestration': 'heat_policy.json',
# 'network': 'neutron_policy.json',
# 'telemetry': 'ceilometer_policy.json',
#}
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
# TROVE_ADD_USER_PERMS = []
# TROVE_ADD_DATABASE_PERMS = []
# Change this patch to the appropriate static directory containing
# two files: _variables.scss and _styles.scss
#CUSTOM_THEME_PATH = 'static/themes/default'
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'neutronclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'heatclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'ceilometerclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'troveclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'swiftclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
'scss': {
'handlers': ['null'],
'propagate': False,
},
}
}
# 'direction' should not be specified for all_tcp/udp/icmp.
# It is specified in the form.
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1433',
'to_port': '1433',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
# Deprecation Notice:
#
# The setting FLAVOR_EXTRA_KEYS has been deprecated.
# Please load extra spec metadata into the Glance Metadata Definition Catalog.
#
# The sample quota definitions can be found in:
# <glance_source>/etc/metadefs/compute-quota.json
#
# The metadata definition catalog supports CLI and API:
# $glance --os-image-api-version 2 help md-namespace-import
# $glance-manage db_load_metadefs <directory_with_definition_files>
#
# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
# Indicate to the Sahara data processing service whether or not
# automatic floating IP allocation is in effect. If it is not
# in effect, the user will be prompted to choose a floating IP
# pool for use in their cluster. False by default. You would want
# to set this to True if you were running Nova Networking with
# auto_assign_floating_ip = True.
#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
# The hash algorithm to use for authentication tokens. This must
# match the hash algorithm that the identity server and the
# auth_token middleware are using. Allowed values are the
# algorithms supported by Python's hashlib library.
#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
# AngularJS requires some settings to be made available to
# the client side. Some settings are required by in-tree / built-in horizon
# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
# form of ['SETTING_1','SETTING_2'], etc.
#
# You may remove settings from this list for security purposes, but do so at
# the risk of breaking a built-in horizon feature. These settings are required
# for horizon to function properly. Only remove them if you know what you
# are doing. These settings may in the future be moved to be defined within
# the enabled panel configuration.
# You should not add settings to this list for out of tree extensions.
# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
'LAUNCH_INSTANCE_DEFAULTS',
'OPENSTACK_IMAGE_FORMATS']
# Additional settings can be made available to the client side for
# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
# !! Please use extreme caution as the settings are transferred via HTTP/S
# and are not encrypted on the browser. This is an experimental API and
# may be deprecated in the future without notice.
#REST_API_ADDITIONAL_SETTINGS = []
# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
# Scripting (XFS) vulnerability, so this option allows extra security hardening
# where iframes are not used in deployment. Default setting is True.
# For more information see:
# http://tinyurl.com/anticlickjack
# DISALLOW_IFRAME_EMBED = True
STATIC_ROOT = '/var/www/html/horizon'
#OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
#present OPENSTACK_API_VERSIONS={"identity":3}
# Use reigon configuration to access platform depoloyment and containerized
# deployment from a single horizon deployment
OPENSTACK_KEYSTONE_URL = "{{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}"
OPENSTACK_NEUTRON_NETWORK['enable_distributed_router'] = True
# TODO(tsmith) remove this, only for HP custom, this isnt being used
# Load Region Config params, if present
# Config OPENSTACK_HOST is still required in region mode since StarlingX
# does not use the local_settings populated via packstack
{{- if eq .Values.conf.horizon.local_settings.config.ss_enabled "True"}}
SS_ENABLED = "True"
OPENSTACK_KEYSTONE_URL = {{ .Values.conf.horizon.local_settings.config.openstack_keystone_url }}
AVAILABLE_REGIONS = [(OPENSTACK_KEYSTONE_URL, {{ .Values.conf.horizon.local_settings.config.region_name }}),]
REGION_NAME = {{ .Values.conf.horizon.local_settings.config.region_name }}
{{- else }}
SS_ENABLED = "False"
{{- end }}
# Load Horizon region exclusion list
REGION_EXCLUSIONS = []
try:
if os.path.exists('/opt/branding/horizon-region-exclusions.csv'):
with open('/opt/branding/horizon-region-exclusions.csv') as f:
for line in f:
if line.startswith('#') or line.startswith(' '):
continue
REGION_EXCLUSIONS = line.rstrip('\n').rstrip('\r').split(',')
except Exception:
pass
DC_MODE = {{ .Values.conf.horizon.local_settings.config.dc_mode }}
# Override openstack-dashboard NG_CACHE_TEMPLATE_AGE
NG_TEMPLATE_CACHE_AGE = 300
# OperationLogMiddleware Configuration
OPERATION_LOG_ENABLED = True
OPERATION_LOG_OPTIONS = {
'mask_fields': ['password', 'bm_password', 'bm_confirm_password',
'current_password', 'confirm_password', 'new_password'],
'target_methods': ['POST', 'PUT', 'DELETE'],
'format': ("[%(project_name)s %(project_id)s] [%(user_name)s %(user_id)s]"
" [%(method)s %(request_url)s %(http_status)s]"
" parameters:[%(param)s] message:[%(message)s]"),
}
# StarlingX Branding Configuration
SITE_BRANDING = "StarlingX"
AVAILABLE_THEMES = [
('default', 'Default', 'themes/default'),
('material', 'Material', 'themes/material'),
('starlingx', 'StarlingX', 'themes/starlingx'),
]
DEFAULT_THEME = 'starlingx'
# Custom Theme Override
for root, dirs, files in os.walk('/opt/branding/applied'):
if 'manifest.py' in files:
execfile(os.path.join(root, 'manifest.py'))
AVAILABLE_THEMES = [
('default', 'Default', 'themes/default'),
('material', 'Material', 'themes/material'),
('starlingx', 'StarlingX', 'themes/starlingx'),
('custom', 'Custom', '/opt/branding/applied'),
]
DEFAULT_THEME = 'custom'
# Secure site configuration
SESSION_COOKIE_HTTPONLY = True
{{- if eq .Values.conf.horizon.local_settings.config.https_enabled "True"}}
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
{{- end }}
LOCKOUT_PERIOD_SEC = float({{ .Values.conf.horizon.local_settings.config.lockout_period_sec }})
LOCKOUT_RETRIES_NUM = int({{ .Values.conf.horizon.local_settings.config.lockout_retries_num }})
# The OPENSTACK_HEAT_STACK settings can be used to disable password
# field required while launching the stack.
OPENSTACK_HEAT_STACK = {
'enable_user_pass': False,
}
HORIZON_CONFIG["password_autocomplete"] = "off"
# Optional service configuration
{{- if eq .Values.conf.horizon.local_settings.config.enable_murano "True"}}
ENABLE_MURANO_TAB = True
{{- else }}
ENABLE_MURANO_TAB = False
{{- end }}
{{- if eq .Values.conf.horizon.local_settings.config.enable_magnum "True"}}
ENABLE_MAGNUM_TAB = True
{{- else }}
ENABLE_MAGNUM_TAB = False
{{- end }}
source:
type: tar
location: http://172.17.0.1/helm_charts/horizon-0.1.0.tgz
subpath: horizon
reference: master
dependencies:
- helm-toolkit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kube-system-ingress
data:
description: "System Ingress Controller"
sequenced: false
chart_group:
- kube-system-ingress
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-ingress
data:
description: "OpenStack Ingress Controller"
sequenced: false
chart_group:
- openstack-ingress
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: provisioner
data:
description: "Provisioner"
sequenced: false
chart_group:
- openstack-rbd-provisioner
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
data:
description: "Mariadb"
sequenced: true
chart_group:
- openstack-mariadb
- openstack-garbd
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-memcached
data:
description: "Memcached"
sequenced: true
chart_group:
- openstack-memcached
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-rabbitmq
data:
description: "Rabbitmq"
sequenced: true
chart_group:
- openstack-rabbitmq
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-keystone
data:
description: "Deploy keystone"
sequenced: true
chart_group:
- openstack-keystone
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-glance
data:
description: "Deploy glance"
sequenced: true
chart_group:
- openstack-glance
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-cinder
data:
description: "Deploy cinder"
sequenced: true
chart_group:
- openstack-cinder
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-compute-kit
data:
description: "Deploy nova and neutron, as well as supporting services"
sequenced: false
chart_group:
- openstack-libvirt
- openstack-nova
- openstack-nova-api-proxy
- openstack-neutron
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-heat
data:
description: "Deploy heat"
sequenced: true
chart_group:
- openstack-heat
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-horizon
data:
description: "Deploy horizon"
sequenced: false
chart_group:
- openstack-horizon
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-telemetry
data:
description: "Deploy telemetry"
sequenced: true
chart_group:
- openstack-aodh
- openstack-gnocchi
- openstack-panko
- openstack-ceilometer
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: armada-manifest
data:
release_prefix: osh
chart_groups:
- kube-system-ingress
- openstack-ingress
- provisioner
- openstack-mariadb
- openstack-memcached
- openstack-rabbitmq
- openstack-keystone
- openstack-glance
- openstack-compute-kit
- openstack-heat
- openstack-horizon
- openstack-cinder