Update manifests to remove unused openstack components

Cleanup unwanted openstack setup on bare metal.

Preparing the manifests to have the services removed from SM.

Bypass setting up openstack services on controller, worker and
storage.

Cleanup haproxy ports for services that will not be running
on bare metal.

Cleanup upgrade, remote logging, postgres, and anything else
related to openstack services that no longer run on bare
metal.

Remove all manifests and templates that are no longer being used.
Strip out any static hiera data that is no longer needed.

Story: 2004764
Task: 29850
Depends-On: Ice10fe6da6b34f1d9206f26e112eb555e2088932
Depends-On: I3c1cc8673be5cf6ab15f9158199bc24fccb44f17
Depends-On: Ie43cf11ebf1edcf3a8bb357205c4c59d2962b4fa
Change-Id: I2be8e9ab418835125ff433d06d2930df37534501
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2019-03-05 13:03:04 -06:00
parent f56056cffc
commit cbecbf7f0b
50 changed files with 164 additions and 4731 deletions

View File

@ -7,9 +7,9 @@
#
########################################################################
NOVAOPENRC="/etc/nova/openrc"
if [ -e ${NOVAOPENRC} ] ; then
source ${NOVAOPENRC} &>/dev/null
PLATFORMOPENRC="/etc/platform/openrc"
if [ -e ${PLATFORMOPENRC} ] ; then
source ${PLATFORMOPENRC} &>/dev/null
else
echo "Admin credentials not found"
exit

View File

@ -193,339 +193,6 @@ keystone::roles::admin::admin_tenant: 'admin'
platform::client::params::identity_auth_url: 'http://localhost:5000/v3'
# glance
glance::api::enabled: false
glance::api::pipeline: 'keystone'
glance::api::database_max_pool_size: 1
glance::api::database_max_overflow: 10
glance::api::verbose: false
glance::api::debug: false
glance::api::use_syslog: true
glance::api::log_facility: 'local2'
glance::api::log_file: '/dev/null'
glance::api::multi_store: true
glance::api::cinder_catalog_info: 'volume:cinder:internalURL'
glance::api::graceful_shutdown: true
glance::api::enable_proxy_headers_parsing: true
glance::api::image_cache_dir: '/opt/cgcs/glance/image-cache'
glance::api::cache_raw_conversion_dir: '/opt/img-conversions/glance'
glance::api::scrubber_datadir: '/opt/cgcs/glance/scrubber'
glance::registry::enabled: false
glance::registry::database_max_pool_size: 1
glance::registry::database_max_overflow: 10
glance::registry::verbose: false
glance::registry::debug: false
glance::registry::use_syslog: true
glance::registry::log_facility: 'local2'
glance::registry::log_file: '/dev/null'
glance::registry::graceful_shutdown: true
glance::backend::rbd::multi_store: true
glance::backend::rbd::rbd_store_user: glance
glance::backend::file::multi_store: true
glance::backend::file::filesystem_store_datadir: '/opt/cgcs/glance/images/'
glance::notify::rabbitmq::notification_driver: 'messagingv2'
# nova
nova::conductor::enabled: false
nova::scheduler::enabled: false
nova::consoleauth::enabled: false
nova::vncproxy::enabled: false
nova::serialproxy::enabled: false
nova::scheduler::filter::ram_weight_multiplier: 0.0
nova::scheduler::filter::disk_weight_multiplier: 0.0
nova::scheduler::filter::io_ops_weight_multiplier: -5.0
nova::scheduler::filter::pci_weight_multiplier: 0.0
nova::scheduler::filter::soft_affinity_weight_multiplier: 0.0
nova::scheduler::filter::soft_anti_affinity_weight_multiplier: 0.0
nova::cron::archive_deleted_rows::hour: '*/12'
nova::cron::archive_deleted_rows::destination: '/dev/null'
nova::api::enabled: false
nova::api::enable_proxy_headers_parsing: true
# nova-api runs on an internal 18774 port and api proxy runs on 8774
nova::api::osapi_compute_listen_port: 18774
nova::api::allow_resize_to_same_host: true
nova::network::neutron::default_floating_pool: 'public'
nova_api_proxy::config::enabled: false
nova_api_proxy::config::eventlet_pool_size: 256
nova_api_proxy::config::use_syslog: true
nova_api_proxy::config::log_facility: 'local5'
# this will trigger simple_setup for cell_v2
nova::db::sync_api::cellv2_setup: true
# neutron
neutron::server::enabled: false
neutron::server::database_idle_timeout: 60
neutron::server::database_max_pool_size: 1
neutron::server::database_max_overflow: 64
neutron::server::enable_proxy_headers_parsing: true
neutron::server::network_scheduler_driver: 'neutron.scheduler.dhcp_host_agent_scheduler.HostBasedScheduler'
neutron::server::router_scheduler_driver: 'neutron.scheduler.l3_host_agent_scheduler.HostBasedScheduler'
neutron::server::notifications::endpoint_type: 'internal'
neutron::plugins::ml2::type_drivers:
- managed_flat
- managed_vlan
- managed_vxlan
neutron::plugins::ml2::tenant_network_types:
- vlan
- vxlan
neutron::plugins::ml2::mechanism_drivers:
- openvswitch
- sriovnicswitch
- l2population
neutron::plugins::ml2::enable_security_group: true
neutron::plugins::ml2::ensure_default_security_group: false
neutron::plugins::ml2::notify_interval: 10
neutron::plugins::ml2::firewall_driver: 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver'
neutron::bgp::bgp_speaker_driver: 'neutron_dynamic_routing.services.bgp.agent.driver.ryu.driver.RyuBgpDriver'
neutron::services::bgpvpn::service_providers:
- 'BGPVPN:DynamicRoutingBGPVPNDriver:networking_bgpvpn.neutron.services.service_drivers.neutron_dynamic_routing.dr.DynamicRoutingBGPVPNDriver:default'
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::agent::notification::enabled: false
ceilometer::agent::notification::disable_non_metric_meters: false
ceilometer::agent::notification::manage_event_pipeline: true
ceilometer::agent::notification::event_pipeline_publishers: ['gnocchi://', 'direct://?dispatcher=panko']
ceilometer::agent::polling::central_namespace: true
ceilometer::agent::polling::compute_namespace: false
ceilometer::agent::polling::ipmi_namespace: true
# Do not create endpoints for ceilometer as ceilometer-api is removed
ceilometer::keystone::auth::configure_endpoint: false
# gnocchi
gnocchi::api::service_name: 'openstack-gnocchi-api'
gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::metricd::enabled: false
gnocchi::storage::file::file_basepath: '/opt/gnocchi'
gnocchi::db::sync::user: 'root'
# aodh
aodh::use_syslog: true
aodh::log_facility: 'local2'
aodh::database_idle_timeout: 60
aodh::database_max_pool_size: 1
aodh::database_max_overflow: 10
aodh::alarm_history_time_to_live: 86400
aodh::auth::auth_endpoint_type: 'internalURL'
aodh::db::sync::user: 'root'
aodh::api::enabled: false
aodh::api::service_name: 'openstack-aodh-api'
aodh::api::enable_proxy_headers_parsing: true
aodh::notifier::enabled: false
aodh::evaluator::enabled: false
aodh::listener::enabled: false
# panko
openstack::panko::params::event_time_to_live: 86400
panko::api::enabled: false
panko::api::service_name: 'openstack-panko-api'
panko::api::enable_proxy_headers_parsing: true
panko::db::database_idle_timeout: 60
panko::db::database_max_pool_size: 1
panko::db::database_max_overflow: 10
panko::logging::use_syslog: true
panko::logging::syslog_log_facility: 'local2'
# cinder
cinder::use_syslog: true
cinder::log_facility: 'local2'
cinder::database_idle_timeout: 60
cinder::database_max_pool_size: 1
cinder::database_max_overflow: 50
cinder::rpc_response_timeout: 180
cinder::backend_host: 'controller'
cinder::image_conversion_dir: '/opt/img-conversions/cinder'
cinder::api::nova_interface: 'internal'
cinder::api::enable_proxy_headers_parsing: true
cinder::ceilometer::notification_driver: 'messaging'
cinder::scheduler::enabled: false
cinder::volume::enabled: false
cinder::backup::posix::backup_posix_path: '/opt/backups'
# backup_file_size should be below 500MB to allow multiple backups
# to run in parallel and not consume all RAM.
# backup_file_size must be a multiple of backup_sha_block_size_bytes
# which has a default value of 32768 bytes.
cinder::backup::posix::backup_file_size: 499974144
cinder::policy::policies:
enable_consistencygroup_create:
key: 'consistencygroup:create'
value: ''
enable_consistencygroup_delete:
key: 'consistencygroup:delete'
value: ''
enable_consistencygroup_update:
key: 'consistencygroup:update'
value: ''
enable_consistencygroup_get:
key: 'consistencygroup:get'
value: ''
enable_consistencygroup_get_all:
key: 'consistencygroup:get_all'
value: ''
enable_consistencygroup_create_cgsnapshot:
key: 'consistencygroup:create_cgsnapshot'
value: ''
enable_consistencygroup_delete_cgsnapshot:
key: 'consistencygroup:delete_cgsnapshot'
value: ''
enable_consistencygroup_get_cgsnapshot:
key: 'consistencygroup:get_cgsnapshot'
value: ''
enable_consistencygroup_get_all_cgsnapshots:
key: 'consistencygroup:get_all_cgsnapshots'
value: ''
enable_snapshot_export_attributes:
key: 'volume_extension:snapshot_export_attributes'
value: 'rule:admin_or_owner'
enable_snapshot_backup_status_attribute:
key: 'volume_extension:snapshot_backup_status_attribute'
value: 'rule:admin_or_owner'
# heat
heat::use_syslog: true
heat::log_facility: 'local6'
heat::database_idle_timeout: 60
heat::database_max_pool_size: 1
heat::database_max_overflow: 15
heat::enable_proxy_headers_parsing: true
heat::heat_clients_insecure: true
heat::api::enabled: false
heat::api_cfn::enabled: false
heat::api_cloudwatch::enabled: false
heat::engine::enabled: false
heat::engine::deferred_auth_method: 'trusts'
# trusts_delegated_roles is set to empty list so all users can use heat
heat::engine::trusts_delegated_roles: []
heat::engine::action_retry_limit: 1
heat::engine::max_resources_per_stack: -1
heat::engine::convergence_engine: false
heat::keystone::domain::domain_name: 'heat'
heat::keystone::auth_cfn::configure_user: false
heat::keystone::auth_cfn::configure_user_role: false
# Murano
murano::db::postgresql::encoding: 'UTF8'
murano::use_syslog: true
murano::log_facility: 'local2'
murano::debug: 'False'
murano::engine::manage_service: true
murano::engine::enabled: false
openstack::murano::params::tcp_listen_options: '[binary,
{packet,raw},
{reuseaddr,true},
{backlog,128},
{nodelay,true},
{linger,{true,0}},
{exit_on_close,false},
{keepalive,true}]'
openstack::murano::params::rabbit_tcp_listen_options:
'[binary,
{packet, raw},
{reuseaddr, true},
{backlog, 128},
{nodelay, true},
{linger, {true, 0}},
{exit_on_close, false}]'
# SSL parameters
# this cipher list is taken from any cipher that is supported by rabbitmq and
# is currently in either lighttpd or haproxy's cipher lists
# constructed on 2017-04-05
openstack::murano::params::rabbit_cipher_list: ["AES128-GCM-SHA256",
"AES128-SHA",
"AES128-SHA256",
"AES256-GCM-SHA384",
"AES256-SHA",
"AES256-SHA256",
"DHE-DSS-AES128-GCM-SHA256",
"DHE-DSS-AES128-SHA256",
"DHE-DSS-AES256-GCM-SHA384",
"DHE-DSS-AES256-SHA256",
"DHE-RSA-AES128-GCM-SHA256",
"DHE-RSA-AES128-SHA256",
"DHE-RSA-AES256-GCM-SHA384",
"DHE-RSA-AES256-SHA256",
"ECDH-ECDSA-AES128-GCM-SHA256",
"ECDH-ECDSA-AES128-SHA256",
"ECDH-ECDSA-AES256-GCM-SHA384",
"ECDH-ECDSA-AES256-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES128-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES256-SHA384",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA256",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA384",
"ECDH-RSA-AES128-GCM-SHA256",
"ECDH-RSA-AES128-SHA256",
"ECDH-RSA-AES256-GCM-SHA384",
"ECDH-RSA-AES256-SHA384"]
# Magnum
magnum::logging::use_syslog: true
magnum::logging::log_facility: 'local2'
magnum::logging::debug: 'False'
magnum::db::postgresql::encoding: 'UTF8'
magnum::notification_driver: 'messagingv2'
magnum::conductor::enabled: false
magnum::password_symbols: '23456789,ABCDEFGHJKLMNPQRSTUVWXYZ,abcdefghijkmnopqrstuvwxyz,!@#$%^&*()<>{}+'
magnum::certificates::cert_manager_type: 'x509keypair'
magnum::clients::endpoint_type: 'internalURL'
# Ironic
ironic::use_syslog: true
ironic::logging::log_facility: 'local2'
ironic::db::postgresql::encoding: 'UTF8'
ironic::logging::debug: false
ironic::api::enabled: false
ironic::conductor::enabled: false
ironic::conductor::enabled_drivers: ['pxe_ipmitool', 'pxe_ipmitool_socat']
ironic::conductor::automated_clean: true
ironic::conductor::default_boot_option: 'local'
ironic::drivers::pxe::images_path: '/opt/img-conversions/ironic/images/'
ironic::drivers::pxe::instance_master_path: '/opt/img-conversions/ironic/master_images'
# Dcorch
dcorch::use_syslog: true
dcorch::log_facility: 'local2'

View File

@ -24,68 +24,6 @@ sysinv::verbose: true
sysinv::log_facility: 'local6'
# neutron
neutron::state_path: '/var/run/neutron'
neutron::lock_path: '/var/run/neutron/lock'
neutron::root_helper: 'sudo'
neutron::host_driver: 'neutron.plugins.wrs.drivers.host.DefaultHostDriver'
neutron::fm_driver: 'neutron.plugins.wrs.drivers.fm.DefaultFmDriver'
neutron::logging::use_syslog: true
neutron::logging::syslog_log_facility: 'local2'
neutron::logging::log_dir: false
neutron::logging::verbose: false
neutron::logging::debug: false
neutron::core_plugin: 'neutron.plugins.ml2.plugin.Ml2Plugin'
neutron::service_plugins:
- 'router'
neutron::allow_overlapping_ips: true
neutron::vlan_transparent: true
neutron::pnet_audit_enabled: false
neutron::verbose: false
neutron::log_dir: false
neutron::use_syslog: true
neutron::notification_driver: ['messagingv2']
neutron::dns_domain: 'openstacklocal'
# nova
nova::use_syslog: true
nova::debug: false
nova::log_facility: 'local6'
nova::notification_driver: 'messagingv2'
nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
nova::notify_on_state_change: 'vm_and_task_state'
nova::database_idle_timeout: 60
nova::database_max_pool_size: 1
nova::database_max_overflow: 64
# Set number of block device allocate retries and interval
# for volume create when VM boots and creates a new volume.
# The total block allocate retries time is set to 2 hours
# to satisfy the volume allocation time on slow RPM disks
# which may take 1 hour and a half per volume when several
# volumes are created in parallel.
nova::block_device_allocate_retries: 2400
nova::block_device_allocate_retries_interval: 3
nova::disk_allocation_ratio: 1.0
nova::cpu_allocation_ratio: 16.0
nova::ram_allocation_ratio: 1.0
# require Nova Placement to use the internal endpoint only
nova::placement::os_interface: 'internal'
# ceilometer
ceilometer::telemetry_secret: ''
ceilometer::use_syslog: true
ceilometer::log_facility: 'local2'
# collectd: configuration
platform::collectd::params::interval: 30
platform::collectd::params::timeout: 2

View File

@ -1,8 +1,2 @@
# storage specific configuration data
---
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::agent::polling::central_namespace: false
ceilometer::agent::polling::compute_namespace: false
ceilometer::agent::polling::ipmi_namespace: true

View File

@ -3,66 +3,3 @@
# vswitch
vswitch::dpdk::memory_channels: 4
# neutron
neutron::agents::dhcp::interface_driver: 'openvswitch'
neutron::agents::dhcp::enable_isolated_metadata: true
neutron::agents::dhcp::state_path: '/var/run/neutron'
neutron::agents::dhcp::root_helper: 'sudo'
neutron::agents::l3::interface_driver: 'openvswitch'
neutron::agents::l3::metadata_port: 80
neutron::agents::l3::agent_mode: 'dvr_snat'
neutron::agents::ml2::ovs::manage_vswitch: false
neutron::agents::ml2::ovs::datapath_type: 'netdev'
neutron::agents::ml2::ovs::vhostuser_socket_dir: '/var/run/openvswitch'
neutron::agents::ml2::ovs::firewall_driver: 'noop'
neutron::agents::ml2::sriov::manage_service: true
neutron::agents::ml2::sriov::polling_interval: 5
# nova
nova::compute::manage_service: false
nova::compute::config_drive_format: 'iso9660'
nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::allow_resize_to_same_host: true
nova::compute::force_raw_images: false
nova::compute::reserved_host_memory: 0
# We want to start up instances on bootup
nova::compute::resume_guests_state_on_host_boot: true
nova::compute::libvirt::compute_driver: 'libvirt.LibvirtDriver'
nova::compute::libvirt::migration_support: true
nova::compute::libvirt::libvirt_cpu_mode: 'none'
nova::compute::libvirt::live_migration_downtime: 500
nova::compute::libvirt::live_migration_downtime_steps: 10
nova::compute::libvirt::live_migration_downtime_delay: 75
nova::compute::libvirt::live_migration_completion_timeout: 180
nova::compute::libvirt::live_migration_progress_timeout: 0
nova::compute::libvirt::mem_stats_period_seconds: 0
nova::compute::libvirt::remove_unused_base_images: true
nova::compute::libvirt::remove_unused_resized_minimum_age_seconds: 86400
nova::compute::libvirt::remove_unused_original_minimum_age_seconds: 3600
nova::compute::libvirt::live_migration_flag: "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED"
nova::network::neutron::neutron_username: 'neutron'
nova::network::neutron::neutron_project_name: 'services'
nova::network::neutron::neutron_user_domain_name: 'Default'
nova::network::neutron::neutron_project_domain_name: 'Default'
nova::network::neutron::neutron_region_name: RegionOne
nova::compute::neutron::libvirt_vif_driver: 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
openstack::nova::compute::compute_monitors: "cpu.virt_driver"
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::agent::polling::central_namespace: false
ceilometer::agent::polling::compute_namespace: true
ceilometer::agent::polling::instance_discovery_method: 'workload_partitioning'
ceilometer::agent::polling::ipmi_namespace: true

View File

@ -17,7 +17,5 @@ include ::platform::amqp::bootstrap
include ::openstack::keystone::bootstrap
include ::openstack::barbican::bootstrap
include ::platform::client::bootstrap
include ::openstack::client::bootstrap
include ::platform::sysinv::bootstrap

View File

@ -75,51 +75,11 @@ include ::platform::fm::api
include ::platform::multipath
include ::platform::client
include ::openstack::client
include ::openstack::keystone
include ::openstack::keystone::api
include ::openstack::glance
include ::openstack::glance::api
include ::openstack::cinder
include ::openstack::cinder::api
include ::openstack::neutron
include ::openstack::neutron::api
include ::openstack::neutron::server
include ::openstack::nova
include ::openstack::nova::api
include ::openstack::nova::network
include ::openstack::nova::controller
include ::openstack::nova::placement
include ::openstack::gnocchi
include ::openstack::gnocchi::api
include ::openstack::gnocchi::metricd
include ::openstack::ceilometer
include ::openstack::ceilometer::agent::notification
include ::openstack::ceilometer::polling
include ::openstack::panko
include ::openstack::panko::api
include ::openstack::heat
include ::openstack::heat::api
include ::openstack::horizon
include ::openstack::murano
include ::openstack::murano::api
include ::openstack::magnum
include ::openstack::magnum::api
include ::openstack::ironic
include ::openstack::ironic::api
include ::platform::dcmanager
include ::platform::dcmanager::manager

View File

@ -29,8 +29,6 @@ include ::platform::collectd
include ::platform::filesystem::storage
include ::platform::docker
include ::platform::ceph::storage
include ::openstack::ceilometer
include ::openstack::ceilometer::polling
class { '::platform::config::storage::post':
stage => post,

View File

@ -17,7 +17,3 @@ include ::platform::amqp::upgrade
include ::openstack::keystone::upgrade
include ::platform::client::upgrade
include ::openstack::client::upgrade
include ::openstack::murano::upgrade
include ::openstack::ironic::upgrade

View File

@ -36,17 +36,7 @@ include ::platform::kubernetes::worker
include ::platform::multipath
include ::platform::client
include ::platform::ceph::worker
include ::openstack::client
include ::openstack::neutron
include ::openstack::neutron::agents
include ::openstack::nova
include ::openstack::nova::compute
include ::openstack::nova::compute::pci
include ::openstack::nova::storage
include ::openstack::nova::network
include ::openstack::nova::placement
include ::openstack::ceilometer
include ::openstack::ceilometer::polling
class { '::platform::config::worker::post':
stage => post,

View File

@ -1,119 +0,0 @@
class openstack::aodh::params (
$api_port = 8042,
$region_name = undef,
$service_name = 'openstack-aodh',
$service_create = false,
$service_enabled = true,
) { }
class openstack::aodh
inherits ::openstack::aodh::params {
if $service_enabled {
include ::platform::params
include ::platform::amqp::params
include ::aodh::auth
include ::aodh::client
include ::aodh::evaluator
include ::aodh::notifier
include ::aodh::listener
include ::aodh::keystone::authtoken
if $::platform::params::init_database {
include ::aodh::db::postgresql
}
aodh_config {
'service_credentials/interface': value => 'internalURL'
}
class { '::aodh':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
# WRS register aodh-expirer-active in cron to run daily at the 35 minute mark
cron { 'aodh-expirer':
ensure => 'present',
command => '/usr/bin/aodh-expirer-active',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '35',
hour => '*/24',
user => 'root',
}
}
}
class openstack::aodh::firewall
inherits ::openstack::aodh::params {
platform::firewall::rule { 'aodh-api':
service_name => 'aodh',
ports => $api_port,
}
}
class openstack::aodh::haproxy
inherits ::openstack::aodh::params {
platform::haproxy::proxy { 'aodh-restapi':
server_name => 's-aodh-restapi',
public_port => $api_port,
private_port => $api_port,
}
}
class openstack::aodh::api
inherits ::openstack::aodh::params {
include ::platform::params
# The aodh user and service are always required and they
# are used by subclouds when the service itself is disabled
# on System Controller
# whether it creates the endpoint is determined by
# aodh::keystone::auth::configure_endpoint which is
# set via sysinv puppet
if ($::openstack::aodh::params::service_create and
$::platform::params::init_keystone) {
include ::aodh::keystone::auth
}
if $service_enabled {
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
$url_host = $::platform::network::mgmt::params::controller_address_url
file { '/usr/share/aodh/aodh-api.conf':
ensure => file,
content => template('openstack/aodh-api.conf.erb'),
owner => 'root',
group => 'root',
mode => '0640',
}
-> class { '::aodh::api':
host => $api_host,
sync_db => $::platform::params::init_database,
enable_proxy_headers_parsing => true,
}
include ::openstack::aodh::firewall
include ::openstack::aodh::haproxy
}
}
class openstack::aodh::runtime {
include ::platform::amqp::params
class { '::aodh':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
}

View File

@ -1,263 +0,0 @@
class openstack::ceilometer::params (
$api_port = 8777,
$region_name = undef,
$service_name = 'openstack-ceilometer',
$service_create = false,
) { }
class openstack::ceilometer {
include ::platform::amqp::params
include ::platform::params
include ::openstack::ceilometer::params
include ::platform::kubernetes::params
class { '::ceilometer':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
rabbit_qos_prefetch_count => 100,
}
if ($::openstack::ceilometer::params::service_create and
$::platform::params::init_keystone) {
include ::ceilometer::keystone::auth
if ($::platform::params::distributed_cloud_role != 'systemcontroller' and
$::platform::kubernetes::params::enabled != true) {
include ::openstack::gnocchi::params
class { '::ceilometer::db::sync':
extra_params => '--skip-metering-database',
require => [Keystone::Resource::Service_identity['ceilometer', 'gnocchi']]
}
if $::platform::params::vswitch_type !~ '^ovs' {
include ::gnocchi::keystone::authtoken
$os_auth_url = $::gnocchi::keystone::authtoken::auth_url
$os_username = $::gnocchi::keystone::authtoken::username
$os_user_domain = $::gnocchi::keystone::authtoken::user_domain_name
$os_project_name = $::gnocchi::keystone::authtoken::project_name
$os_project_domain = $::gnocchi::keystone::authtoken::project_domain_name
$os_region_name = $::gnocchi::keystone::authtoken::region_name
$os_auth_type = $::gnocchi::keystone::authtoken::auth_type
$os_password = $::gnocchi::keystone::authtoken::password
$os_interface = 'internalURL'
Class['::ceilometer::db::sync']
-> exec { 'Creating vswitch resource types':
command => 'gnocchi resource-type create vswitch_engine \
-a cpu_id:number:true:min=0 \
-a host:string:true:max_length=64;
gnocchi resource-type create vswitch_interface_and_port \
-a host:string:false:max_length=64 \
-a network_uuid:string:false:max_length=255 \
-a network_id:string:false:max_length=255 \
-a link-speed:number:false:min=0',
environment => ["OS_AUTH_URL=${os_auth_url}",
"OS_USERNAME=${os_username}",
"OS_USER_DOMAIN_NAME=${os_user_domain}",
"OS_PROJECT_NAME=${os_project_name}",
"OS_PROJECT_DOMAIN_NAME=${os_project_domain}",
"OS_REGION_NAME=${os_region_name}",
"OS_INTERFACE=${os_interface}",
"OS_AUTH_TYPE=${os_auth_type}",
"OS_PASSWORD=${os_password}"],
}
}
}
}
include ::ceilometer::agent::auth
include ::openstack::cinder::params
include ::openstack::glance::params
# FIXME(mpeters): generic parameter can be moved to the puppet module
ceilometer_config {
'DEFAULT/executor_thread_pool_size': value => 16;
'DEFAULT/shuffle_time_before_polling_task': value => 30;
'DEFAULT/batch_polled_samples': value => true;
'oslo_messaging_rabbit/rpc_conn_pool_size': value => 10;
'oslo_messaging_rabbit/socket_timeout': value => 1.00;
'compute/resource_update_interval': value => 60;
'DEFAULT/region_name_for_services': value => $::openstack::ceilometer::params::region_name;
}
if $::personality == 'controller' {
include ::platform::memcached::params
$memcache_ip = $::platform::memcached::params::listen_ip
$memcache_port = $::platform::memcached::params::tcp_port
$memcache_ip_version = $::platform::memcached::params::listen_ip_version
$memcache_servers = $memcache_ip_version ? {
4 => "'${memcache_ip}:${memcache_port}'",
6 => "'inet6:[${memcache_ip}]:${memcache_port}'",
}
oslo::cache { 'ceilometer_config':
enabled => true,
backend => 'dogpile.cache.memcached',
memcache_servers => $memcache_servers,
expiration_time => 86400,
}
}
if $::platform::params::region_config {
if $::openstack::glance::params::region_name != $::platform::params::region_2_name {
$shared_service_glance = [$::openstack::glance::params::service_type]
} else {
$shared_service_glance = []
}
# skip the check if cinder region name has not been configured
if ($::openstack::cinder::params::region_name != undef and
$::openstack::cinder::params::region_name != $::platform::params::region_2_name) {
$shared_service_cinder = [$::openstack::cinder::params::service_type,
$::openstack::cinder::params::service_type_v2,
$::openstack::cinder::params::service_type_v3]
} else {
$shared_service_cinder = []
}
$shared_services = concat($shared_service_glance, $shared_service_cinder)
ceilometer_config {
'DEFAULT/region_name_for_shared_services': value => $::platform::params::region_1_name;
'DEFAULT/shared_services_types': value => join($shared_services,',');
}
}
}
class openstack::ceilometer::agent::notification {
include ::platform::params
$cgcs_fs_directory = '/opt/cgcs'
$ceilometer_directory = "${cgcs_fs_directory}/ceilometer"
$ceilometer_directory_csv = "${ceilometer_directory}/csv"
$ceilometer_directory_versioned = "${ceilometer_directory}/${::platform::params::software_version}"
file { '/etc/ceilometer/pipeline.yaml':
ensure => 'present',
content => template('openstack/pipeline.yaml.erb'),
mode => '0640',
owner => 'root',
group => 'ceilometer',
tag => 'ceilometer-yamls',
}
-> file { $ceilometer_directory:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $ceilometer_directory_csv:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $ceilometer_directory_versioned:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { "${ceilometer_directory_versioned}/pipeline.yaml":
ensure => 'file',
source => '/etc/ceilometer/pipeline.yaml',
owner => 'root',
group => 'root',
mode => '0640',
}
file { '/etc/ceilometer/gnocchi_resources.yaml':
ensure => 'present',
content => template('openstack/gnocchi_resources.yaml.erb'),
mode => '0640',
owner => 'root',
group => 'ceilometer',
tag => 'ceilometer-yamls',
}
# Limit the number of ceilometer agent notification workers to 10 max
$agent_workers_count = min($::platform::params::eng_workers_by_2, 10)
if $::platform::params::system_type == 'All-in-one' {
$batch_timeout = 25
} else {
$batch_timeout = 5
}
# FIXME(mpeters): generic parameter can be moved to the puppet module
ceilometer_config {
'DEFAULT/csv_location': value => $ceilometer_directory_csv;
'DEFAULT/csv_location_strict': value => true;
'notification/workers': value => $agent_workers_count;
'notification/batch_size': value => 100;
'notification/batch_timeout': value => $batch_timeout;
}
}
class openstack::ceilometer::polling (
$instance_polling_interval = 600,
$instance_cpu_polling_interval = 30,
$instance_disk_polling_interval = 600,
$ipmi_polling_interval = 600,
$ceph_polling_interval = 600,
$image_polling_interval = 600,
$volume_polling_interval = 600,
) {
include ::platform::params
include ::platform::kubernetes::params
file { '/etc/ceilometer/polling.yaml':
ensure => 'present',
content => template('openstack/polling.yaml.erb'),
mode => '0640',
owner => 'root',
group => 'ceilometer',
tag => 'ceilometer-yamls',
}
if $::personality == 'controller' {
$central_namespace = true
} else {
$central_namespace = false
}
if (str2bool($::disable_worker_services) or
$::platform::kubernetes::params::enabled) {
$agent_enable = false
$compute_namespace = false
file { '/etc/pmon.d/ceilometer-polling.conf':
ensure => absent,
}
} else {
$agent_enable = true
if str2bool($::is_worker_subfunction) {
$pmon_target = '/etc/ceilometer/ceilometer-polling-compute.conf.pmon'
$compute_namespace = true
} else {
$pmon_target = '/etc/ceilometer/ceilometer-polling.conf.pmon'
$compute_namespace = false
}
file { '/etc/pmon.d/ceilometer-polling.conf':
ensure => link,
target => $pmon_target,
owner => 'root',
group => 'root',
mode => '0640',
}
}
class { '::ceilometer::agent::polling':
enabled => $agent_enable,
central_namespace => $central_namespace,
compute_namespace => $compute_namespace,
}
}

View File

@ -1,803 +0,0 @@
# TODO (rchurch): Make sure all includes have the correct global scope
class openstack::cinder::params (
$service_enabled = false,
$api_port = 8776,
$api_proxy_port = 28776,
$region_name = undef,
$service_name = 'openstack-cinder',
$service_type = 'volume',
$service_type_v2 = 'volumev2',
$service_type_v3 = 'volumev3',
$configure_endpoint = true,
$enabled_backends = [],
$cinder_address = undef,
$cinder_directory = '/opt/cgcs/cinder',
$cinder_image_conversion_dir = '/opt/img-conversions/cinder',
$cinder_device = '',
$cinder_size = undef,
$cinder_fs_device = '/dev/drbd4',
$cinder_vg_name = 'cinder-volumes',
$drbd_resource = 'drbd-cinder',
$iscsi_ip_address = undef,
$is_ceph_external = false,
# Flag files
$initial_cinder_config_flag = "${::platform::params::config_path}/.initial_cinder_config_complete",
$initial_cinder_lvm_config_flag = "${::platform::params::config_path}/.initial_cinder_lvm_config_complete",
$initial_cinder_ceph_config_flag = "${::platform::params::config_path}/.initial_cinder_ceph_config_complete",
$node_cinder_lvm_config_flag = '/etc/platform/.node_cinder_lvm_config_complete',
) {
$cinder_disk = regsubst($cinder_device, '-part\d+$', '')
# Take appropriate actions based on the service states defined by:
# - $is_initial_cinder => first time ever when cinder is configured;
# - $is_initial_cinder_lvm => first time ever when LVM cinder is configured on the system;
# - $is_initial_cinder_ceph => first time ever when Ceph cinder is configured on the system;
# - $is_node_cinder_lvm => cinder LVM is configured/reconfigured on a node;
# - $is_node_cinder_ceph => cinder Ceph is configured/reconfigured on a node.
# These states are dependent on two aspects:
# 1. A flag file present on the disk either in:
# - DRBD synced /opt/platform, for system flags or in
# - local folder /etc/platform, for node specific flags
# 2. Controller standby or active state. Sometimes manifests are applied at the same time on both
# controllers with most configuration happenning on the active node and minimal on the standby.
if $service_enabled {
# Check if this is the first time we ever configure cinder on this system
if str2bool($::is_controller_active) and str2bool($::is_initial_cinder_config) {
$is_initial_cinder = true
} else {
$is_initial_cinder = false
}
if 'lvm' in $enabled_backends {
# Check if this is the first time we ever configure LVM on this system
if str2bool($::is_controller_active) and str2bool($::is_initial_cinder_lvm_config) {
$is_initial_cinder_lvm = true
} else {
$is_initial_cinder_lvm = false
}
# Check if we should configure/reconfigure cinder LVM for this node.
# True in case of node reinstalls, device replacements, reconfigurations etc.
if str2bool($::is_node_cinder_lvm_config) {
$is_node_cinder_lvm = true
} else {
$is_node_cinder_lvm = false
}
} else {
$is_initial_cinder_lvm = false
$is_node_cinder_lvm = false
}
if 'ceph' in $enabled_backends or $is_ceph_external {
# Check if this is the first time we ever configure Ceph on this system
if str2bool($::is_controller_active) and str2bool($::is_initial_cinder_ceph_config) {
$is_initial_cinder_ceph = true
} else {
$is_initial_cinder_ceph = false
}
} else {
$is_initial_cinder_ceph = false
}
# Cinder needs to be running on initial configuration of either Ceph or LVM
if str2bool($::is_controller_active) and ($is_initial_cinder_lvm or $is_initial_cinder_ceph) {
$enable_cinder_service = true
} else {
$enable_cinder_service = false
}
} else {
$is_initial_cinder = false
$is_initial_cinder_lvm = false
$is_node_cinder_lvm = false
$is_initial_cinder_ceph = false
$is_node_cinder_ceph = false
$enable_cinder_service = false
}
}
# Called from controller manifest
class openstack::cinder
inherits ::openstack::cinder::params {
# TODO (rchurch): This will create the cinder DB on a system that may never run cinder. This make sense?
#if $is_initial_cinder {
if $::platform::params::init_database {
include platform::postgresql::server
include ::cinder::db::postgresql
}
# TODO (rchurch): Make this happen after config_controller? If we do that we should
# exec 'cinder-manage db sync' as root instead of 'cinder' user
#if $is_initial_cinder {
if str2bool($::is_initial_config_primary) {
include ::cinder::db::sync
}
include ::platform::params
include ::platform::amqp::params
include ::platform::network::mgmt::params
$controller_address = $::platform::network::mgmt::params::controller_address
group { 'cinder':
ensure => 'present',
gid => '165',
}
user { 'cinder':
ensure => 'present',
comment => 'OpenStack Cinder Daemons',
gid => '165',
groups => ['nobody', 'cinder', $::platform::params::protected_group_name],
home => '/var/lib/cinder',
password => '!!',
password_max_age => '-1',
password_min_age => '-1',
shell => '/sbin/nologin',
uid => '165',
}
if $service_enabled {
file { $cinder_directory:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $cinder_image_conversion_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { "${cinder_directory}/data":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
} else {
file { $cinder_directory:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { "${cinder_directory}/data":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
}
class { '::cinder':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
include ::cinder::keystone::authtoken
include ::cinder::scheduler
include ::cinder::client
include ::cinder::volume
include ::cinder::ceilometer
include ::cinder::glance
include ::openstack::cinder::config
include ::openstack::cinder::backends
include ::openstack::cinder::backup
include ::platform::multipath::params
# TODO(mpeters): move to puppet module formal parameters
cinder_config {
'DEFAULT/my_ip': value => $controller_address;
'DEFAULT/state_path': value => "${cinder_directory}/data";
# Reduce the number of RPCs that can be handled in parallel from the
# default of 64. Doing too much at once (e.g. creating volumes) results
# in a lot of thrashing and operations time out.
# Liberty renamed this from rpc_thread_pool_size to executor_thread_pool_size
'DEFAULT/executor_thread_pool_size': value => '32';
'DEFAULT/enable_force_upload': value => true;
'DEFAULT/use_multipath_for_image_xfer': value => $::platform::multipath::params::enabled;
'backend_defaults/use_multipath_for_image_xfer': value => $::platform::multipath::params::enabled;
}
# Run cinder-manage to purge deleted rows daily at the 30 minute mark
cron { 'cinder-purge-deleted':
ensure => 'present',
command => '/usr/bin/cinder-purge-deleted-active',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '30',
hour => '*/24',
user => 'root',
}
}
class openstack::cinder::config::default(
$config_params
) inherits ::openstack::cinder::params {
# Realize any service parameter provided DEFAULT section params of cinder.conf
create_resources('cinder_config', hiera_hash('openstack::cinder::config::default::config_params', {}))
}
class openstack::cinder::config
inherits ::openstack::cinder::params {
include ::openstack::cinder::config::default
}
class openstack::cinder::backup
inherits ::openstack::cinder::params {
# For now only support file backend backup
include ::cinder::backup::posix
}
class openstack::cinder::backends::san
inherits ::openstack::cinder::params {
include ::openstack::cinder::emc_vnx
include ::openstack::cinder::backends::hpe3par
include ::openstack::cinder::hpelefthand
}
class openstack::cinder::backends
inherits ::openstack::cinder::params {
class { '::cinder::backends':
enabled_backends => $enabled_backends
}
if 'lvm' in $enabled_backends {
include ::openstack::cinder::lvm
}
if 'ceph' in $enabled_backends or $is_ceph_external {
include ::openstack::cinder::backends::ceph
}
include openstack::cinder::backends::san
}
class openstack::cinder::lvm::filesystem::drbd (
$device = '/dev/drbd4',
$lv_name = 'cinder-lv',
$mountpoint = '/opt/cinder',
$port = '7792',
$vg_name = 'cinder-volumes',
$drbd_handoff = true,
) inherits ::openstack::cinder::params {
include ::platform::drbd::params
include ::platform::drbd::cgcs::params
if str2bool($::is_primary_disk_rotational) {
$resync_after = $::platform::drbd::cgcs::params::resource_name
} else {
$resync_after = undef
}
if (str2bool($::is_controller_active) or
(str2bool($::is_standalone_controller) and $is_node_cinder_lvm)
) {
# Run DRBD cinder initial setup in two cases
# 1) first time Cinder LVM is configured,
# 2) when cinder's disk is replaced on a standalone controller
# (mostly to accommodate SX disk replacement).
# Note: Cinder disk replacement is triggered from sysinv by removing
# the checkpoint file behind is_node_cinder_lvm.
$ha_primary = true
$initial_setup = true
$service_enable = true
$service_ensure = 'running'
} else {
$ha_primary = false
$initial_setup = false
$service_enable = false
$service_ensure = 'stopped'
}
if $is_node_cinder_lvm {
# prepare disk for drbd
file { '/etc/udev/mount.blacklist':
ensure => present,
mode => '0644',
owner => 'root',
group => 'root',
}
-> file_line { "blacklist ${cinder_disk} automount":
ensure => present,
line => $cinder_disk,
path => '/etc/udev/mount.blacklist',
}
}
drbd::resource { $drbd_resource:
disk => "\"${cinder_device}\"",
port => $port,
device => $device,
mountpoint => $mountpoint,
handlers => {
before-resync-target =>
"/usr/local/sbin/sm-notify -s ${drbd_resource} -e sync-start",
after-resync-target =>
"/usr/local/sbin/sm-notify -s ${drbd_resource} -e sync-end",
},
host1 => $::platform::drbd::params::host1,
host2 => $::platform::drbd::params::host2,
ip1 => $::platform::drbd::params::ip1,
ip2 => $::platform::drbd::params::ip2,
manage => $is_node_cinder_lvm,
ha_primary => $ha_primary,
initial_setup => $initial_setup,
automount => $::platform::drbd::params::automount,
fs_type => $::platform::drbd::params::fs_type,
link_util => $::platform::drbd::params::link_util,
link_speed => $::platform::drbd::params::link_speed,
num_parallel => $::platform::drbd::params::num_parallel,
rtt_ms => $::platform::drbd::params::rtt_ms,
cpumask => $::platform::drbd::params::cpumask,
resync_after => $resync_after,
require => [ Class['::platform::partitions'], File_line['final filter: update lvm global_filter'] ]
}
if ($is_initial_cinder_lvm or
(str2bool($::is_standalone_controller) and $is_node_cinder_lvm)
){
# Recreate cinder-volumes in two cases:
# 1) first time Cinder LVM is configured,
# 2) when cinder's disk is replaced on a standalone controller
# (mostly to accommodate SX disk replacement).
# Note: Cinder disk replacement is triggered from sysinv by removing
# the checkpoint file behind is_node_cinder_lvm.
physical_volume { $device:
ensure => present,
require => Drbd::Resource[$drbd_resource]
}
-> volume_group { $vg_name:
ensure => present,
physical_volumes => $device,
}
# Create an initial LV, because the LVM ocf resource does not work with
# an empty VG.
-> logical_volume { 'anchor-lv':
ensure => present,
volume_group => $vg_name,
size => '1M',
size_is_minsize => true,
}
# Deactivate the VG now. If this isn't done, it prevents DRBD from
# being stopped later by the SM.
-> exec { 'Deactivate VG':
command => "vgchange -a ln ${vg_name}",
}
# Make sure the primary resource is in the correct state so that on swact to
# controller-1 sm has the resource in an acceptable state to become managed
# and primary. But, if this primary is a single controller we will restart
# SM so keep it primary
# TODO (rchurch): fix up the drbd_handoff logic.
-> exec { 'Set $drbd_resource role':
command => str2bool($drbd_handoff) ? {true => "drbdadm secondary ${drbd_resource}", default => '/bin/true'},
unless => "drbdadm role ${drbd_resource} | egrep '^Secondary'",
}
}
}
class openstack::cinder::lvm(
$lvm_type = 'thin',
) inherits ::openstack::cinder::params {
# if $::platform::params::system_mode != 'simplex' {
# include ::openstack::cinder::lvm::filesystem::drbd
# } else {
# include ::openstack::cinder::lvm::filesystem::simplex
# }
include ::openstack::cinder::lvm::filesystem::drbd
file_line { 'snapshot_autoextend_threshold':
path => '/etc/lvm/lvm.conf',
match => '^\s*snapshot_autoextend_threshold +=.*',
line => ' snapshot_autoextend_threshold = 80',
}
file_line { 'snapshot_autoextend_percent':
path => '/etc/lvm/lvm.conf',
match => '^\s*snapshot_autoextend_percent +=.*',
line => ' snapshot_autoextend_percent = 20',
}
file { "${cinder_directory}/iscsi-target":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
require => File[$cinder_directory],
}
-> file { "${cinder_directory}/iscsi-target/saveconfig.json":
ensure => 'present',
owner => 'root',
group => 'root',
mode => '0600',
content => '{
"fabric_modules": [],
"storage_objects": [],
"targets": []
}',
}
if $lvm_type == 'thin' {
$iscsi_lvm_config = {
'lvm/iscsi_target_flags' => {'value' => 'direct'},
'lvm/lvm_type' => {'value' => 'thin'},
'DEFAULT/max_over_subscription_ratio' => {'value' => 1.0}
}
} else {
$iscsi_lvm_config = {
'lvm/iscsi_target_flags' => {'value' => 'direct'},
'lvm/lvm_type' => {'value' => 'default'},
'lvm/volume_clear' => {'value' => 'none'}
}
}
cinder::backend::iscsi { 'lvm':
iscsi_ip_address => $iscsi_ip_address,
extra_options => $iscsi_lvm_config ,
volumes_dir => "${cinder_directory}/data/volumes",
}
}
define openstack::cinder::backend::ceph(
$backend_name,
$rbd_pool,
$backend_enabled = false,
$rbd_user = 'cinder',
$rbd_ceph_conf = '/etc/ceph/ceph.conf'
) {
if $backend_enabled {
cinder::backend::rbd {$backend_name:
backend_host => '$host',
rbd_pool => $rbd_pool,
rbd_user => $rbd_user,
rbd_ceph_conf => $rbd_ceph_conf,
}
} else {
cinder_config {
"${backend_name}/volume_backend_name": ensure => absent;
"${backend_name}/volume_driver": ensure => absent;
"${backend_name}/backend_host": ensure => absent;
"${backend_name}/rbd_ceph_conf": ensure => absent;
"${backend_name}/rbd_pool": ensure => absent;
}
}
}
class openstack::cinder::backends::ceph (
$ceph_backend_configs = {}
) inherits ::openstack::cinder::params {
create_resources('openstack::cinder::backend::ceph', $ceph_backend_configs)
}
class openstack::cinder::emc_vnx(
$feature_enabled,
$config_params
) inherits ::openstack::cinder::params {
create_resources('cinder_config', hiera_hash('openstack::cinder::emc_vnx::config_params', {}))
if $feature_enabled {
$scsi_id_ensure = 'link'
} else {
$scsi_id_ensure = 'absent'
}
#TODO(rchurch): Evaluate this with Pike... Still needed?
# During creating EMC cinder bootable volume, linuxscsi.py in
# python2-os-brick-1.1.0-1.el7.noarch invokes "scsi_id" command and
# fails as "scsi_id" is not in the search PATH. So create a symlink
# here. The fix is already in the later version of os-brick. We
# can remove this code when python2-os-brick is upgraded.
file { '/usr/bin/scsi_id':
ensure => $scsi_id_ensure,
owner => 'root',
group => 'root',
target => '/lib/udev/scsi_id',
}
}
define openstack::cinder::backend::hpe3par
{
$hiera_params = "openstack::cinder::${name}::config_params"
$feature_enabled = "openstack::cinder::${name}::feature_enabled"
create_resources('cinder_config', hiera_hash($hiera_params, {}))
if $feature_enabled {
exec {"Including ${name} configuration":
path => [ '/usr/bin', '/usr/sbin', '/bin', '/sbin' ],
command => "echo Including ${name} configuration",
}
}
}
class openstack::cinder::backends::hpe3par (
$sections = []
) inherits ::openstack::cinder::params {
::openstack::cinder::backend::hpe3par {$sections:}
}
class openstack::cinder::hpelefthand(
$feature_enabled,
$config_params
) inherits ::openstack::cinder::params {
create_resources('cinder_config', hiera_hash('openstack::cinder::hpelefthand::config_params', {}))
# As HP SANs are addon PS supported options, make sure we have explicit
# logging showing this is being included when the feature is enabled.
if $feature_enabled {
exec {'Including hpelefthand configuration':
path => [ '/usr/bin', '/usr/sbin', '/bin', '/sbin' ],
command => 'echo Including hpelefthand configuration',
}
}
}
class openstack::cinder::firewall
inherits ::openstack::cinder::params {
if $service_enabled {
platform::firewall::rule { 'cinder-api':
service_name => 'cinder',
ports => $api_port,
}
}
}
class openstack::cinder::haproxy
inherits ::openstack::cinder::params {
if $service_enabled {
platform::haproxy::proxy { 'cinder-restapi':
server_name => 's-cinder',
public_port => $api_port,
private_port => $api_port,
}
}
}
define openstack::cinder::api::backend(
$backend_name,
$type_name,
$type_enabled = false,
) {
# Run it on the active controller, otherwise the prefetch step tries to query
# cinder and can fail
if str2bool($::is_controller_active) {
if $type_enabled {
cinder_type { $type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}"]
}
} else {
cinder_type { $type_name:
ensure => absent
}
}
}
}
class openstack::cinder::api::backends(
$ceph_type_configs = {}
) inherits ::openstack::cinder::params {
# Only include cinder_type the first time an lvm or ceph backend is
# initialized
if $is_initial_cinder_lvm {
::openstack::cinder::api::backend { 'lvm-store':
type_enabled => true,
type_name => 'iscsi',
backend_name => 'lvm'
}
}
# Add/Remove any additional cinder ceph tier types
create_resources('openstack::cinder::api::backend', $ceph_type_configs)
# Add SAN volume types here when/if required
}
# Called from the controller manifest
class openstack::cinder::api
inherits ::openstack::cinder::params {
include ::platform::params
$api_workers = $::platform::params::eng_workers
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
$upgrade = $::platform::params::controller_upgrade
if $service_enabled and (str2bool($::is_controller_active) or $upgrade) {
include ::cinder::keystone::auth
if $::platform::params::distributed_cloud_role == 'systemcontroller' {
include ::dcorch::keystone::auth
include ::platform::dcorch::firewall
include ::platform::dcorch::haproxy
}
}
class { '::cinder::api':
bind_host => $api_host,
service_workers => $api_workers,
sync_db => $::platform::params::init_database,
enabled => str2bool($enable_cinder_service)
}
if $::openstack::cinder::params::configure_endpoint {
include ::openstack::cinder::firewall
include ::openstack::cinder::haproxy
}
if $service_enabled {
include ::openstack::cinder::api::backends
}
class { '::openstack::cinder::pre':
stage => pre
}
class { '::openstack::cinder::post':
stage => post
}
}
class openstack::cinder::pre {
include ::openstack::cinder::params
$enabled = str2bool($::openstack::cinder::params::enable_cinder_service)
if $::platform::params::distributed_cloud_role =='systemcontroller' and $enabled {
# need to enable cinder-api-proxy in order to apply the cinder manifest
exec { 'Enable Dcorch Cinder API Proxy':
command => 'systemctl enable dcorch-cinder-api-proxy; systemctl start dcorch-cinder-api-proxy',
}
}
}
class openstack::cinder::post
inherits ::openstack::cinder::params {
# Ensure that phases are marked as complete
if $is_initial_cinder {
file { $initial_cinder_config_flag:
ensure => present
}
}
if $is_initial_cinder_lvm {
file { $initial_cinder_lvm_config_flag:
ensure => present
}
}
if $is_initial_cinder_ceph {
file { $initial_cinder_ceph_config_flag:
ensure => present
}
}
# To workaround an upstream bug in rbd code, we need to create
# an empty file /etc/ceph/ceph.client.None.keyring in order to
# do cinder backup and restore.
file { '/etc/ceph/ceph.client.None.keyring':
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
}
if $is_node_cinder_lvm {
file { $node_cinder_lvm_config_flag:
ensure => present
}
}
# cinder-api needs to be running in order to apply the cinder manifest,
# however, it needs to be stopped/disabled to allow SM to manage the service.
# To allow for the transition it must be explicitly stopped. Once puppet
# can directly handle SM managed services, then this can be removed.
exec { 'Disable OpenStack - Cinder API':
command => 'systemctl stop openstack-cinder-api; systemctl disable openstack-cinder-api',
require => Class['openstack::cinder'],
}
if $::platform::params::distributed_cloud_role =='systemcontroller' {
# stop and disable the cinder api proxy to allow SM to manage the service
exec { 'Disable Dcorch Cinder API Proxy':
command => 'systemctl stop dcorch-cinder-api-proxy; systemctl disable dcorch-cinder-api-proxy',
require => Class['openstack::cinder'],
}
}
}
class openstack::cinder::reload {
platform::sm::restart {'cinder-scheduler': }
platform::sm::restart {'cinder-volume': }
platform::sm::restart {'cinder-backup': }
platform::sm::restart {'cinder-api': }
}
# Called for runtime changes
class openstack::cinder::runtime
inherits ::openstack::cinder::params {
include ::openstack::cinder
include ::openstack::cinder::api
class { '::openstack::cinder::reload':
stage => post
}
}
# Called for runtime changes on region
class openstack::cinder::endpoint::runtime {
if str2bool($::is_controller_active) {
include ::cinder::keystone::auth
}
}
# Called for service_parameter runtime changes:
# - Currently cinder.conf only changes
# - external SAN backend sections
# - default section changes
class openstack::cinder::service_param::runtime
inherits ::openstack::cinder::params {
class { '::cinder::backends':
enabled_backends => $enabled_backends
}
include ::openstack::cinder::config::default
include ::openstack::cinder::backends::san
class { '::openstack::cinder::reload':
stage => post
}
}
# Called for rbd backend runtime changes
class openstack::cinder::backends::ceph::runtime
inherits ::openstack::cinder::params {
class { '::cinder::backends':
enabled_backends => $enabled_backends
}
if $service_enabled {
include ::openstack::cinder::backends::ceph
include ::openstack::cinder::api::backends
}
class { '::openstack::cinder::reload':
stage => post
}
}

View File

@ -1,43 +0,0 @@
class openstack::client {
include ::platform::client::params
$admin_username = $::platform::client::params::admin_username
$identity_auth_url = $::platform::client::params::identity_auth_url
$identity_region = $::platform::client::params::identity_region
$identity_api_version = $::platform::client::params::identity_api_version
$admin_user_domain = $::platform::client::params::admin_user_domain
$admin_project_domain = $::platform::client::params::admin_project_domain
$admin_project_name = $::platform::client::params::admin_project_name
$keystone_identity_region = $::platform::client::params::keystone_identity_region
include ::platform::client::credentials::params
$keyring_file = $::platform::client::credentials::params::keyring_file
file {'/etc/nova/openrc':
ensure => 'present',
mode => '0640',
owner => 'nova',
group => 'root',
content => template('openstack/openrc.admin.erb'),
}
file {'/etc/nova/ldap_openrc_template':
ensure => 'present',
mode => '0644',
content => template('openstack/openrc.ldap.erb'),
}
file {'/etc/bash_completion.d/openstack':
ensure => 'present',
mode => '0644',
content => generate('/usr/bin/openstack', 'complete'),
}
}
class openstack::client::bootstrap {
include ::openstack::client
}
class openstack::client::upgrade {
include ::openstack::client
}

View File

@ -1,209 +0,0 @@
class openstack::glance::params (
$api_host,
$service_enabled = true,
$api_port = 9292,
$region_name = undef,
$service_type = 'image',
$glance_directory = '/opt/cgcs/glance',
$glance_image_conversion_dir = '/opt/img-conversions/glance',
$enabled_backends = [],
$service_create = false,
$configured_registry_host = '0.0.0.0',
$remote_registry_region_name = undef,
$glance_cached = false,
$glance_delete_interval = 6,
$rbd_store_pool = 'images',
$rbd_store_ceph_conf = '/etc/ceph/ceph.conf',
) { }
class openstack::glance
inherits ::openstack::glance::params {
if $service_enabled {
include ::platform::params
include ::platform::amqp::params
file { $glance_directory:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { "${glance_directory}/image-cache":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { "${glance_directory}/images":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $glance_image_conversion_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
$bind_host = $::platform::network::mgmt::params::subnet_version ? {
6 => '::',
default => '0.0.0.0',
}
if $::platform::params::init_database {
class { '::glance::db::postgresql':
encoding => 'UTF8',
}
}
include ::glance::api::authtoken
include ::glance::registry::authtoken
class { '::glance::registry':
bind_host => $bind_host,
workers => $::platform::params::eng_workers,
}
# Run glance-manage to purge deleted rows daily at the 45 minute mark
cron { 'glance-purge-deleted':
ensure => 'present',
command => '/usr/bin/glance-purge-deleted-active',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '45',
hour => '*/24',
user => 'root',
}
cron { 'glance-cleaner':
ensure => 'present',
command => "/usr/bin/glance-cleaner --config-file /etc/glance/glance-api.conf --delete-interval ${glance_delete_interval}",
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '35',
hour => "*/${glance_delete_interval}",
user => 'root',
}
# In glance cached mode run the pruner once every 6 hours to clean
# stale or orphaned images
if $::openstack::glance::params::glance_cached {
cron { 'glance-cache-pruner':
ensure => 'present',
command => '/usr/bin/glance-cache-pruner --config-file /etc/glance/glance-api.conf',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '15',
hour => '*/6',
user => 'root',
}
}
class { '::glance::notify::rabbitmq':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
if 'file' in $enabled_backends {
include ::glance::backend::file
}
}
}
class openstack::glance::firewall
inherits ::openstack::glance::params {
platform::firewall::rule { 'glance-api':
service_name => 'glance',
ports => $api_port,
}
}
class openstack::glance::haproxy
inherits ::openstack::glance::params {
platform::haproxy::proxy { 'glance-restapi':
server_name => 's-glance',
public_port => $api_port,
private_port => $api_port,
private_ip_address => $api_host,
}
}
class openstack::glance::api
inherits ::openstack::glance::params {
include ::platform::params
if $service_enabled {
if ($::openstack::glance::params::service_create and
$::platform::params::init_keystone) {
include ::glance::keystone::auth
}
include ::platform::params
$api_workers = $::platform::params::eng_workers
include ::platform::network::mgmt::params
# magical hack for magical config - glance option registry_host requires brackets
if $configured_registry_host == '0.0.0.0' {
$registry_host = $::platform::network::mgmt::params::subnet_version ? {
6 => '::0',
default => '0.0.0.0',
# TO-DO(mmagr): Add IPv6 support when hostnames are used
}
} else {
$registry_host = $configured_registry_host
}
# enable copy-on-write cloning from glance to cinder only for rbd
# this speeds up creation of volumes from images
$show_image_direct_url = ('rbd' in $enabled_backends)
if ($::platform::params::distributed_cloud_role == 'subcloud') {
$api_use_user_token = false
} else {
$api_use_user_token = true
}
class { '::glance::api':
bind_host => $api_host,
use_user_token => $api_use_user_token,
registry_host => $registry_host,
remote_registry_region_name => $remote_registry_region_name,
workers => $api_workers,
sync_db => $::platform::params::init_database,
show_image_direct_url => $show_image_direct_url,
}
if 'rbd' in $enabled_backends {
class { '::glance::backend::rbd':
rbd_store_pool => $rbd_store_pool,
rbd_store_ceph_conf => $rbd_store_ceph_conf,
}
}
include ::openstack::glance::firewall
include ::openstack::glance::haproxy
}
}
class openstack::glance::api::reload {
platform::sm::restart {'glance-api': }
}
class openstack::glance::api::runtime
inherits ::openstack::glance::params {
if $service_enabled {
include ::openstack::glance::api
class { '::openstack::glance::api::reload':
stage => post
}
}
}

View File

@ -1,122 +0,0 @@
class openstack::gnocchi::params (
$api_port = 8041,
$region_name = undef,
$service_name = 'openstack-gnocchi',
$service_create = false,
$service_enabled = true,
) { }
class openstack::gnocchi
inherits ::openstack::gnocchi::params {
if $service_enabled {
include ::platform::params
include ::gnocchi
include ::gnocchi::api
include ::gnocchi::client
include ::gnocchi::keystone::authtoken
include ::gnocchi::storage::file
if $::platform::params::init_database {
include ::gnocchi::db::postgresql
}
}
}
class openstack::gnocchi::firewall
inherits ::openstack::gnocchi::params {
platform::firewall::rule { 'gnocchi-api':
service_name => 'gnocchi',
ports => $api_port,
}
}
class openstack::gnocchi::haproxy
inherits ::openstack::gnocchi::params {
platform::haproxy::proxy { 'gnocchi-restapi':
server_name => 's-gnocchi-restapi',
public_port => $api_port,
private_port => $api_port,
}
}
class openstack::gnocchi::metricd
inherits ::openstack::gnocchi::params {
if $service_enabled {
include ::platform::params
$metricd_workers = $::platform::params::eng_workers_by_2
class { '::gnocchi::metricd':
workers => $metricd_workers
}
if $metricd_workers > 1 {
gnocchi_config {
'DEFAULT/coordination_url': value => 'file:///opt/gnocchi/locks';
}
}
}
}
class openstack::gnocchi::api
inherits ::openstack::gnocchi::params {
include ::platform::params
# The gnocchi user and service are always required and they
# are used by subclouds when the service itself is disabled
# on System Controller
# whether it creates the endpoint is determined by
# gnocchi::keystone::auth::configure_endpoint which is
# set via sysinv puppet
if $::openstack::gnocchi::params::service_create and
$::platform::params::init_keystone {
include ::gnocchi::keystone::auth
}
if $service_enabled {
$api_workers = $::platform::params::eng_workers_by_2
include ::platform::network::mgmt::params
$url_host = $::platform::network::mgmt::params::controller_address_url
file { '/usr/share/gnocchi/gnocchi-api.conf':
ensure => file,
content => template('openstack/gnocchi-api.conf.erb'),
owner => 'root',
group => 'root',
mode => '0640',
}
$storage_configured = inline_template("<% if File.exists?('/opt/gnocchi/tmp/gnocchi-config') -%>true<% else %>false<% end -%>")
if ! str2bool($storage_configured) {
include ::openstack::gnocchi::metricd
$sacks_number = $::openstack::gnocchi::metricd::metricd_workers + 2
if $::platform::params::init_database {
$options = "--sacks-number ${sacks_number}"
} else {
$options = "--sacks-number ${sacks_number} --skip-index --skip-archive-policies-creation"
}
class { '::gnocchi::db::sync':
extra_opts => $options
}
}
include ::openstack::gnocchi::firewall
include ::openstack::gnocchi::haproxy
}
}

View File

@ -1,237 +0,0 @@
class openstack::heat::params (
$api_port = 8004,
$cfn_port = 8000,
$cloudwatch_port = 8003,
$region_name = undef,
$domain_name = undef,
$domain_admin = undef,
$domain_pwd = undef,
$service_name = 'openstack-heat',
$service_tenant = undef,
$default_endpoint_type = 'internalURL',
$service_create = false,
$service_enabled = true,
) {
include ::platform::params
$api_workers = $::platform::params::eng_workers
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
}
class openstack::heat
inherits ::openstack::heat::params {
include ::platform::params
if $service_enabled {
include ::platform::amqp::params
if $::platform::params::init_database {
include ::heat::db::postgresql
}
include ::heat::keystone::authtoken
class { '::heat':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
heat_clients_endpoint_type => $default_endpoint_type,
sync_db => $::platform::params::init_database,
}
class { '::heat::engine':
num_engine_workers => $::platform::params::eng_workers
}
}
if $::platform::params::region_config {
if $::openstack::glance::params::region_name != $::platform::params::region_2_name {
$shared_service_glance = [$::openstack::glance::params::service_type]
} else {
$shared_service_glance = []
}
# skip the check if cinder region name has not been configured
if ($::openstack::cinder::params::region_name != undef and
$::openstack::cinder::params::region_name != $::platform::params::region_2_name) {
$shared_service_cinder = [$::openstack::cinder::params::service_type,
$::openstack::cinder::params::service_type_v2,
$::openstack::cinder::params::service_type_v3]
} else {
$shared_service_cinder = []
}
$shared_services = concat($shared_service_glance, $shared_service_cinder)
heat_config {
'DEFAULT/region_name_for_shared_services': value => $::platform::params::region_1_name;
'DEFAULT/shared_services_types': value => join($shared_services,',');
}
# Subclouds use the region one service tenant and heat domain. In region
# mode we duplicate these in each region.
if $::platform::params::distributed_cloud_role != 'subcloud' {
keystone_tenant { $service_tenant:
ensure => present,
enabled => true,
description => "Tenant for ${::platform::params::region_2_name}",
}
class { '::heat::keystone::domain':
domain_name => $domain_name,
domain_admin => $domain_admin,
manage_domain => true,
manage_user => true,
manage_role => true,
}
}
}
else {
if str2bool($::is_initial_config_primary) {
# Only setup roles and domain information on the controller during initial config
if $service_enabled {
keystone_user_role { 'admin@admin':
ensure => present,
roles => ['admin', '_member_', 'heat_stack_owner'],
require => Class['::heat::engine'],
}
} else {
keystone_user_role { 'admin@admin':
ensure => present,
roles => ['admin', '_member_', 'heat_stack_owner'],
}
}
# Heat stack owner needs to be created
keystone_role { 'heat_stack_owner':
ensure => present,
}
class { '::heat::keystone::domain':
manage_domain => true,
manage_user => true,
manage_role => true,
}
} else {
# Second controller does not invoke keystone, but does need configuration
class { '::heat::keystone::domain':
manage_domain => false,
manage_user => false,
manage_role => false,
}
}
}
if $service_enabled {
# clients_heat endpoint type is publicURL to support wait conditions
heat_config {
'clients_neutron/endpoint_type': value => $default_endpoint_type;
'clients_nova/endpoint_type': value => $default_endpoint_type;
'clients_glance/endpoint_type': value => $default_endpoint_type;
'clients_cinder/endpoint_type': value => $default_endpoint_type;
'clients_ceilometer/endpoint_type':value => $default_endpoint_type;
'clients_heat/endpoint_type': value => 'publicURL';
'clients_keystone/endpoint_type': value => $default_endpoint_type;
}
# Run heat-manage purge_deleted daily at the 20 minute mark
cron { 'heat-purge-deleted':
ensure => 'present',
command => '/usr/bin/heat-purge-deleted-active',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '20',
hour => '*/24',
user => 'root',
}
}
}
class openstack::heat::firewall
inherits ::openstack::heat::params {
platform::firewall::rule { 'heat-api':
service_name => 'heat',
ports => $api_port,
}
platform::firewall::rule { 'heat-cfn':
service_name => 'heat-cfn',
ports => $cfn_port,
}
platform::firewall::rule { 'heat-cloudwatch':
service_name => 'heat-cloudwatch',
ports => $cloudwatch_port,
}
}
class openstack::heat::haproxy
inherits ::openstack::heat::params {
platform::haproxy::proxy { 'heat-restapi':
server_name => 's-heat',
public_port => $api_port,
private_port => $api_port,
}
platform::haproxy::proxy { 'heat-cfn-restapi':
server_name => 's-heat-cfn',
public_port => $cfn_port,
private_port => $cfn_port,
}
platform::haproxy::proxy { 'heat-cloudwatch':
server_name => 's-heat-cloudwatch',
public_port => $cloudwatch_port,
private_port => $cloudwatch_port,
}
}
class openstack::heat::api
inherits ::openstack::heat::params {
# The heat user and service are always required and they
# are used by subclouds when the service itself is disabled
# on System Controller
# whether it creates the endpoint is determined by
# heat::keystone::auth::configure_endpoint which is
# set via sysinv puppet
if ($::openstack::heat::params::service_create and
$::platform::params::init_keystone) {
include ::heat::keystone::auth
include ::heat::keystone::auth_cfn
}
if $service_enabled {
class { '::heat::api':
bind_host => $api_host,
workers => $api_workers,
}
class { '::heat::api_cfn':
bind_host => $api_host,
workers => $api_workers,
}
class { '::heat::api_cloudwatch':
bind_host => $api_host,
workers => $api_workers,
}
include ::openstack::heat::firewall
include ::openstack::heat::haproxy
}
}
class openstack::heat::engine::reload {
platform::sm::restart {'heat-engine': }
}
class openstack::heat::engine::runtime {
include ::openstack::heat
class {'::openstack::heat::engine::reload':
stage => post
}
}

View File

@ -82,20 +82,6 @@ class openstack::horizon
$workers = $::platform::params::eng_workers_by_2
include ::openstack::murano::params
if $::openstack::murano::params::service_enabled {
$murano_enabled = 'True'
} else {
$murano_enabled = 'False'
}
include ::openstack::magnum::params
if $::openstack::magnum::params::service_enabled {
$magnum_enabled = 'True'
} else {
$magnum_enabled = 'False'
}
if str2bool($::is_initial_config) {
exec { 'Stop lighttpd':
command => 'systemctl stop lighttpd; systemctl disable lighttpd',

View File

@ -1,176 +0,0 @@
class openstack::ironic::params (
$api_port = 6485,
$service_enabled = false,
$service_name = 'openstack-ironic',
$region_name = undef,
$default_endpoint_type = 'internalURL',
$tftp_server = undef,
$provisioning_network = undef,
$controller_0_if = undef,
$controller_1_if = undef,
$netmask = undef,
) {
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
include ::platform::params
$sw_version = $::platform::params::software_version
$ironic_basedir = '/opt/cgcs/ironic'
$ironic_versioned_dir = "${ironic_basedir}/${sw_version}"
$ironic_tftpboot_dir = "${ironic_versioned_dir}/tftpboot"
}
class openstack::ironic::firewall
inherits ::openstack::ironic::params {
if $service_enabled {
platform::firewall::rule { 'ironic-api':
service_name => 'ironic',
ports => $api_port,
}
}
}
class openstack::ironic::haproxy
inherits ::openstack::ironic::params {
if $service_enabled {
platform::haproxy::proxy { 'ironic-restapi':
server_name => 's-ironic-restapi',
public_port => $api_port,
private_port => $api_port,
}
platform::haproxy::proxy { 'ironic-tftp-restapi':
server_name => 's-ironic-tftp-restapi',
public_port => $api_port,
private_port => $api_port,
public_ip_address => $tftp_server,
enable_https => false,
}
}
}
class openstack::ironic
inherits ::openstack::ironic::params {
include ::platform::params
include ::platform::amqp::params
include ::platform::network::mgmt::params
include ::ironic::neutron
include ::ironic::glance
if $::platform::params::init_database {
include ::ironic::db::postgresql
}
if str2bool($::is_initial_config_primary) {
include ::ironic::db::sync
}
class {'::ironic':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
sync_db => false,
my_ip => $api_host,
}
if $tftp_server != undef {
$ipa_api_url = "http://${tftp_server}:${api_port}"
}
else {
$ipa_api_url = undef
}
# provisioning and cleaning networks are intentionally the same
class {'::ironic::conductor':
provisioning_network => $provisioning_network,
cleaning_network => $provisioning_network,
api_url => $ipa_api_url,
}
$tftp_master_path = "${ironic_tftpboot_dir}/master_images"
class {'::ironic::drivers::pxe':
tftp_server => $tftp_server,
tftp_root => $ironic_tftpboot_dir,
tftp_master_path => $tftp_master_path,
pxe_append_params => 'nofb nomodeset vga=normal console=ttyS0,115200n8',
}
# configure tftp root directory
if $::platform::params::init_database {
$ironic_tftp_root_dir = "/opt/cgcs/ironic/${sw_version}"
file { $ironic_basedir:
ensure => 'directory',
owner => 'ironic',
group => 'root',
mode => '0755',
}
-> file { $ironic_versioned_dir:
ensure => 'directory',
owner => 'ironic',
group => 'root',
mode => '0755',
}
-> file { $ironic_tftpboot_dir:
ensure => 'directory',
owner => 'ironic',
group => 'root',
mode => '0755',
}
}
if str2bool($::is_controller_active) {
file { "${ironic_tftpboot_dir}/pxelinux.0":
owner => 'root',
group => 'root',
mode => '0755',
source => '/usr/share/syslinux/pxelinux.0'
}
file { "${ironic_tftpboot_dir}/chain.c32":
owner => 'root',
group => 'root',
mode => '0755',
source => '/usr/share/syslinux/chain.c32'
}
}
}
class openstack::ironic::api
inherits ::openstack::ironic::params {
class { '::ironic::api':
port => $api_port,
host_ip => $api_host,
}
if $service_enabled {
include ::ironic::keystone::auth
}
include ::openstack::ironic::haproxy
include ::openstack::ironic::firewall
}
class openstack::ironic::upgrade
inherits ::openstack::ironic::params{
file { $ironic_basedir:
ensure => 'directory',
owner => 'ironic',
group => 'root',
mode => '0755',
}
-> file { $ironic_versioned_dir:
ensure => 'directory',
owner => 'ironic',
group => 'root',
mode => '0755',
}
-> file { $ironic_tftpboot_dir:
ensure => 'directory',
owner => 'ironic',
group => 'root',
mode => '0755',
}
}

View File

@ -52,7 +52,7 @@ class openstack::keystone (
$bind_host = $::platform::network::mgmt::params::controller_address_url
}
Class[$name] -> Class['::platform::client'] -> Class['::openstack::client']
Class[$name] -> Class['::platform::client']
include ::keystone::client
@ -217,7 +217,7 @@ class openstack::keystone::bootstrap(
include ::keystone::db::postgresql
Class[$name] -> Class['::platform::client'] -> Class['::openstack::client']
Class[$name] -> Class['::platform::client']
# Create the parent directory for fernet keys repository
file { $keystone_key_repo_path:
@ -320,7 +320,6 @@ class openstack::keystone::endpointgroup
class openstack::keystone::server::runtime {
include ::platform::client
include ::openstack::client
include ::openstack::keystone
class {'::openstack::keystone::reload':
@ -339,52 +338,6 @@ class openstack::keystone::endpoint::runtime {
include ::nfv::keystone::auth
include ::fm::keystone::auth
include ::ceilometer::keystone::auth
include ::openstack::heat::params
if $::openstack::heat::params::service_enabled {
include ::heat::keystone::auth
include ::heat::keystone::auth_cfn
}
include ::neutron::keystone::auth
include ::nova::keystone::auth
include ::nova::keystone::auth_placement
include ::openstack::panko::params
if $::openstack::panko::params::service_enabled {
include ::panko::keystone::auth
}
include ::openstack::gnocchi::params
if $::openstack::gnocchi::params::service_enabled {
include ::gnocchi::keystone::auth
}
include ::openstack::cinder::params
if $::openstack::cinder::params::service_enabled {
include ::cinder::keystone::auth
}
include ::openstack::glance::params
include ::glance::keystone::auth
include ::openstack::murano::params
if $::openstack::murano::params::service_enabled {
include ::murano::keystone::auth
}
include ::openstack::magnum::params
if $::openstack::magnum::params::service_enabled {
include ::magnum::keystone::auth
include ::magnum::keystone::domain
}
include ::openstack::ironic::params
if $::openstack::ironic::params::service_enabled {
include ::ironic::keystone::auth
}
include ::platform::ceph::params
if $::platform::ceph::params::service_enabled {
include ::platform::ceph::rgw::keystone::auth
@ -401,7 +354,6 @@ class openstack::keystone::endpoint::runtime {
}
include ::smapi::keystone::auth
}
}

View File

@ -1,85 +0,0 @@
class openstack::magnum::params (
$api_port = 9511,
$service_enabled = false,
$service_name = 'openstack-magnum',
) {}
class openstack::magnum
inherits ::openstack::magnum::params {
if $::platform::params::init_database {
include ::magnum::db::postgresql
}
if str2bool($::is_initial_config_primary) {
class { '::magnum::db::sync': }
}
include ::platform::params
include ::platform::amqp::params
include ::magnum::client
include ::magnum::clients
include ::magnum::db
include ::magnum::logging
include ::magnum::conductor
include ::magnum::certificates
class {'::magnum':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
if $::platform::params::init_database {
include ::magnum::db::postgresql
}
}
class openstack::magnum::firewall
inherits ::openstack::magnum::params {
if $service_enabled {
platform::firewall::rule { 'magnum-api':
service_name => 'magnum',
ports => $api_port,
}
}
}
class openstack::magnum::haproxy
inherits ::openstack::magnum::params {
if $service_enabled {
platform::haproxy::proxy { 'magnum-restapi':
server_name => 's-magnum',
public_port => $api_port,
private_port => $api_port,
}
}
}
class openstack::magnum::api
inherits ::openstack::magnum::params {
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
if $service_enabled {
include ::magnum::keystone::auth
include ::magnum::keystone::authtoken
include ::magnum::keystone::domain
}
class { '::magnum::api':
enabled => false,
host => $api_host,
sync_db => false,
}
include ::openstack::magnum::haproxy
include ::openstack::magnum::firewall
}

View File

@ -1,288 +0,0 @@
class openstack::murano::params (
$tcp_listen_options,
$rabbit_tcp_listen_options,
$rabbit_cipher_list,
$api_port = 8082,
$auth_password = 'guest',
$auth_user = 'guest',
$service_enabled = false,
$disable_murano_agent = true,
$service_name = 'openstack-murano',
$database_idle_timeout = 60,
$database_max_pool_size = 1,
$database_max_overflow = 10,
$rabbit_normal_port = '5672',
$rabbit_ssl_port = '5671',
$rabbit_certs_dir = '/etc/ssl/private/murano-rabbit',
$tlsv2 = 'tlsv1.2',
$tlsv1 = 'tlsv1.1',
$ssl_fail_if_no_peer_cert = true,
$disk_free_limit = '10000000',
$heartbeat = '30',
$ssl = false,
) {}
class openstack::murano::firewall
inherits ::openstack::murano::params {
if $service_enabled {
platform::firewall::rule { 'murano-api':
service_name => 'murano',
ports => $api_port,
}
if $disable_murano_agent != true {
if $ssl == true {
platform::firewall::rule { 'murano-rabbit-ssl':
service_name => 'murano-rabbit-ssl',
ports => 5671,
}
platform::firewall::rule { 'murano-rabbit-regular':
ensure => absent,
ports => 5672,
service_name => 'murano-rabbit-regular',
}
} else {
platform::firewall::rule { 'murano-rabbit-regular':
service_name => 'murano-rabbit-regular',
ports => 5672,
}
platform::firewall::rule { 'murano-rabbit-ssl':
ensure => absent,
ports => 5671,
service_name => 'murano-rabbit-ssl',
}
}
} else {
platform::firewall::rule { 'murano-rabbit-regular':
ensure => absent,
ports => 5672,
service_name => 'murano-rabbit-regular',
}
platform::firewall::rule { 'murano-rabbit-ssl':
ensure => absent,
ports => 5671,
service_name => 'murano-rabbit-ssl',
}
}
}
}
class openstack::murano::haproxy
inherits ::openstack::murano::params {
if $service_enabled {
platform::haproxy::proxy { 'murano-restapi':
server_name => 's-murano-restapi',
public_port => $api_port,
private_port => $api_port,
}
}
}
class openstack::murano
inherits ::openstack::murano::params {
if $::platform::params::init_database {
include ::murano::db::postgresql
}
if str2bool($::is_initial_config_primary) {
class { '::murano::db::sync': }
}
include ::platform::params
include ::platform::amqp::params
include ::murano::client
class { '::murano::dashboard':
sync_db => false,
}
class { '::murano::engine':
workers => $::platform::params::eng_workers_by_4,
}
if $ssl {
$murano_rabbit_port = $rabbit_ssl_port
$murano_cacert = "${rabbit_certs_dir}/ca-cert.pem"
} else {
$murano_rabbit_port = $rabbit_normal_port
$murano_cacert = undef
}
include ::murano::params
class {'::murano':
use_syslog => true,
log_facility => 'local2',
service_host => $::platform::network::mgmt::params::controller_address,
service_port => '8082',
database_idle_timeout => $database_idle_timeout,
database_max_pool_size => $database_max_pool_size,
database_max_overflow => $database_max_overflow,
sync_db => false,
rabbit_own_user => $::openstack::murano::params::auth_user,
rabbit_own_password => $::openstack::murano::params::auth_password,
rabbit_own_host => $::platform::network::oam::params::controller_address,
rabbit_own_port => $murano_rabbit_port,
rabbit_own_vhost => '/',
rabbit_own_use_ssl => $ssl,
rabbit_own_ca_certs => $murano_cacert,
disable_murano_agent => $disable_murano_agent,
api_workers => $::platform::params::eng_workers_by_4,
default_transport_url => $::platform::amqp::params::transport_url,
}
# this rabbitmq is separate from the main one and used only for murano
case $::platform::amqp::params::backend {
'rabbitmq': {
enable_murano_agent_rabbitmq { 'rabbitmq': }
}
default: {}
}
}
class openstack::murano::api
inherits ::openstack::murano::params {
include ::platform::params
class { '::murano::api':
enabled => false,
host => $::platform::network::mgmt::params::controller_address,
}
$upgrade = $::platform::params::controller_upgrade
if $service_enabled and (str2bool($::is_controller_active) or $upgrade) {
include ::murano::keystone::auth
}
include ::openstack::murano::haproxy
include ::openstack::murano::firewall
}
define enable_murano_agent_rabbitmq {
include ::openstack::murano::params
include ::platform::params
# Rabbit configuration parameters
$amqp_platform_sw_version = $::platform::params::software_version
$kombu_ssl_ca_certs = "${::openstack::murano::params::rabbit_certs_dir}/ca-cert.pem"
$kombu_ssl_keyfile = "${::openstack::murano::params::rabbit_certs_dir}/key.pem"
$kombu_ssl_certfile = "${::openstack::murano::params::rabbit_certs_dir}/cert.pem"
$murano_rabbit_dir = '/var/lib/rabbitmq/murano'
$rabbit_home = "${murano_rabbit_dir}/${amqp_platform_sw_version}"
$mnesia_base = "${rabbit_home}/mnesia"
$rabbit_node = $::platform::amqp::params::node
$murano_rabbit_node = "murano-${rabbit_node}"
$default_user = $::openstack::murano::params::auth_user
$default_pass = $::openstack::murano::params::auth_password
$disk_free_limit = $::openstack::murano::params::disk_free_limit
$heartbeat = $::openstack::murano::params::heartbeat
$port = $::openstack::murano::params::rabbit_normal_port
$rabbit_cipher_list = $::openstack::murano::params::rabbit_cipher_list
$ssl_interface = $::platform::network::oam::params::controller_address
$ssl_port = $::openstack::murano::params::rabbit_ssl_port
$tlsv2 = $::openstack::murano::params::tlsv2
$tlsv1 = $::openstack::murano::params::tlsv1
$fail_if_no_peer_cert = $::openstack::murano::params::ssl_fail_if_no_peer_cert
$tcp_listen_options = $::openstack::murano::params::tcp_listen_options
$rabbit_tcp_listen_options = $::openstack::murano::params::rabbit_tcp_listen_options
# murano rabbit ssl certificates are placed here
file { $::openstack::murano::params::rabbit_certs_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
if $::platform::params::init_database {
file { $murano_rabbit_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $rabbit_home:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $mnesia_base:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
} -> Class['::rabbitmq']
}
if $::openstack::murano::params::ssl {
$files_to_set_owner = [ $kombu_ssl_keyfile, $kombu_ssl_certfile ]
file { $files_to_set_owner:
owner => 'rabbitmq',
group => 'rabbitmq',
require => Package['rabbitmq-server'],
notify => Service['rabbitmq-server'],
}
$rabbitmq_conf_template= 'openstack/murano-rabbitmq.config.ssl.erb'
} else {
$rabbitmq_conf_template= 'openstack/murano-rabbitmq.config.erb'
}
file { '/etc/rabbitmq/murano-rabbitmq.config':
ensure => present,
owner => 'rabbitmq',
group => 'rabbitmq',
mode => '0640',
content => template($rabbitmq_conf_template),
}
file { '/etc/rabbitmq/murano-rabbitmq-env.conf':
ensure => present,
owner => 'rabbitmq',
group => 'rabbitmq',
mode => '0640',
content => template('openstack/murano-rabbitmq-env.conf.erb'),
}
}
class openstack::murano::upgrade {
include ::platform::params
$amqp_platform_sw_version = $::platform::params::software_version
$murano_rabbit_dir = '/var/lib/rabbitmq/murano'
$rabbit_home = "${murano_rabbit_dir}/${amqp_platform_sw_version}"
$mnesia_base = "${rabbit_home}/mnesia"
file { $murano_rabbit_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $rabbit_home:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $mnesia_base:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
}

View File

@ -1,332 +0,0 @@
class openstack::neutron::params (
$api_port = 9696,
$bgp_port = 179,
$region_name = undef,
$service_name = 'openstack-neutron',
$bgp_router_id = undef,
$service_create = false,
$configure_endpoint = true,
$tunnel_csum = undef,
) { }
class openstack::neutron
inherits ::openstack::neutron::params {
include ::platform::params
include ::platform::amqp::params
include ::neutron::logging
class { '::neutron':
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
}
define openstack::neutron::sdn::controller (
$transport,
$ip_address,
$port,
) {
include ::platform::params
include ::platform::network::oam::params
include ::platform::network::mgmt::params
$oam_interface = $::platform::network::oam::params::interface_name
$mgmt_subnet_network = $::platform::network::mgmt::params::subnet_network
$mgmt_subnet_prefixlen = $::platform::network::mgmt::params::subnet_prefixlen
$oam_address = $::platform::network::oam::params::controller_address
$system_type = $::platform::params::system_type
$mgmt_subnet = "${mgmt_subnet_network}/${mgmt_subnet_prefixlen}"
if $system_type == 'Standard' {
if $transport == 'tls' {
$firewall_proto_transport = 'tcp'
} else {
$firewall_proto_transport = $transport
}
platform::firewall::rule { $name:
service_name => $name,
table => 'nat',
chain => 'POSTROUTING',
proto => $firewall_proto_transport,
outiface => $oam_interface,
tosource => $oam_address,
destination => $ip_address,
host => $mgmt_subnet,
jump => 'SNAT',
}
}
}
class openstack::neutron::odl::params(
$username = undef,
$password= undef,
$url = undef,
$controller_config = {},
$port_binding_controller = undef,
) {}
class openstack::neutron::odl
inherits ::openstack::neutron::odl::params {
include ::platform::params
if $::platform::params::sdn_enabled {
create_resources('openstack::neutron::sdn::controller', $controller_config, {})
}
class {'::neutron::plugins::ml2::opendaylight':
odl_username => $username,
odl_password => $password,
odl_url => $url,
port_binding_controller => $port_binding_controller,
}
}
class openstack::neutron::bgp
inherits ::openstack::neutron::params {
if $bgp_router_id {
class {'::neutron::bgp':
bgp_router_id => $bgp_router_id,
}
class {'::neutron::services::bgpvpn':
}
exec { 'systemctl enable neutron-bgp-dragent.service':
command => 'systemctl enable neutron-bgp-dragent.service',
}
exec { 'systemctl restart neutron-bgp-dragent.service':
command => 'systemctl restart neutron-bgp-dragent.service',
}
file { '/etc/pmon.d/':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
file { '/etc/pmon.d/neutron-bgp-dragent.conf':
ensure => link,
target => '/etc/neutron/pmon/neutron-bgp-dragent.conf',
owner => 'root',
group => 'root',
}
} else {
exec { 'pmon-stop neutron-bgp-dragent':
command => 'pmon-stop neutron-bgp-dragent',
}
-> exec { 'rm -f /etc/pmon.d/neutron-bgp-dragent.conf':
command => 'rm -f /etc/pmon.d/neutron-bgp-dragent.conf',
}
-> exec { 'systemctl disable neutron-bgp-dragent.service':
command => 'systemctl disable neutron-bgp-dragent.service',
}
-> exec { 'systemctl stop neutron-bgp-dragent.service':
command => 'systemctl stop neutron-bgp-dragent.service',
}
}
}
class openstack::neutron::sfc (
$sfc_drivers = 'ovs',
$flowclassifier_drivers = 'ovs',
$sfc_quota_flow_classifier = undef,
$sfc_quota_port_chain = undef,
$sfc_quota_port_pair_group = undef,
$sfc_quota_port_pair = undef,
) inherits ::openstack::neutron::params {
if $sfc_drivers {
class {'::neutron::sfc':
sfc_drivers => $sfc_drivers,
flowclassifier_drivers => $flowclassifier_drivers,
quota_flow_classifier => $sfc_quota_flow_classifier,
quota_port_chain => $sfc_quota_port_chain,
quota_port_pair_group => $sfc_quota_port_pair_group,
quota_port_pair => $sfc_quota_port_pair,
}
}
}
class openstack::neutron::server {
include ::platform::params
if $::platform::params::init_database {
include ::neutron::db::postgresql
}
include ::neutron::plugins::ml2
include ::neutron::server::notifications
include ::neutron::keystone::authtoken
class { '::neutron::server':
api_workers => $::platform::params::eng_workers_by_2,
rpc_workers => $::platform::params::eng_workers_by_2,
sync_db => $::platform::params::init_database,
}
file { '/etc/neutron/api-paste.ini':
ensure => file,
mode => '0640',
}
Class['::neutron::server'] -> File['/etc/neutron/api-paste.ini']
include ::openstack::neutron::bgp
include ::openstack::neutron::odl
include ::openstack::neutron::sfc
}
class openstack::neutron::agents
inherits ::openstack::neutron::params {
include ::platform::kubernetes::params
if (str2bool($::disable_worker_services) or
$::platform::kubernetes::params::enabled) {
$pmon_ensure = absent
class {'::neutron::agents::l3':
enabled => false
}
class {'::neutron::agents::dhcp':
enabled => false
}
class {'::neutron::agents::metadata':
enabled => false,
}
class {'::neutron::agents::ml2::sriov':
enabled => false
}
class {'::neutron::agents::ml2::ovs':
enabled => false
}
} else {
$pmon_ensure = link
class {'::neutron::agents::metadata':
metadata_workers => $::platform::params::eng_workers_by_4
}
include ::neutron::agents::dhcp
include ::neutron::agents::l3
include ::neutron::agents::ml2::sriov
include ::neutron::agents::ml2::ovs
}
if $::platform::params::vswitch_type =~ '^ovs' {
# Ensure bridges and addresses are configured before agent is started
Platform::Vswitch::Ovs::Bridge<||> ~> Service['neutron-ovs-agent-service']
Platform::Vswitch::Ovs::Address<||> ~> Service['neutron-ovs-agent-service']
# Enable/disable tunnel checksum
neutron_agent_ovs {
'agent/tunnel_csum': value => $tunnel_csum;
}
}
file { '/etc/pmon.d/neutron-dhcp-agent.conf':
ensure => $pmon_ensure,
target => '/etc/neutron/pmon/neutron-dhcp-agent.conf',
owner => 'root',
group => 'root',
mode => '0755',
}
file { '/etc/pmon.d/neutron-metadata-agent.conf':
ensure => $pmon_ensure,
target => '/etc/neutron/pmon/neutron-metadata-agent.conf',
owner => 'root',
group => 'root',
mode => '0755',
}
file { '/etc/pmon.d/neutron-sriov-nic-agent.conf':
ensure => $pmon_ensure,
target => '/etc/neutron/pmon/neutron-sriov-nic-agent.conf',
owner => 'root',
group => 'root',
mode => '0755',
}
}
class openstack::neutron::firewall
inherits ::openstack::neutron::params {
platform::firewall::rule { 'neutron-api':
service_name => 'neutron',
ports => $api_port,
}
if $bgp_router_id {
platform::firewall::rule { 'ryu-bgp-port':
service_name => 'neutron',
ports => $bgp_port,
}
} else {
platform::firewall::rule { 'ryu-bgp-port':
ensure => absent,
service_name => 'neutron',
ports => $bgp_port,
}
}
}
class openstack::neutron::haproxy
inherits ::openstack::neutron::params {
platform::haproxy::proxy { 'neutron-restapi':
server_name => 's-neutron',
public_port => $api_port,
private_port => $api_port,
}
}
class openstack::neutron::api
inherits ::openstack::neutron::params {
include ::platform::params
if ($::openstack::neutron::params::service_create and
$::platform::params::init_keystone) {
include ::neutron::keystone::auth
}
if $::openstack::neutron::params::configure_endpoint {
include ::openstack::neutron::firewall
include ::openstack::neutron::haproxy
}
}
class openstack::neutron::server::reload {
platform::sm::restart {'neutron-server': }
}
class openstack::neutron::server::runtime {
include ::openstack::neutron
include ::openstack::neutron::server
include ::openstack::neutron::firewall
class {'::openstack::neutron::server::reload':
stage => post
}
}

View File

@ -1,117 +0,0 @@
class openstack::panko::params (
$api_port = 8977,
$region_name = undef,
$service_name = 'openstack-panko',
$service_create = false,
$event_time_to_live = '-1',
$service_enabled = true,
) { }
class openstack::panko
inherits ::openstack::panko::params {
if $service_enabled {
include ::platform::params
include ::panko::client
include ::panko::keystone::authtoken
if $::platform::params::init_database {
include ::panko::db::postgresql
}
class { '::panko::db':
}
panko_config {
'database/event_time_to_live': value => $event_time_to_live;
}
# WRS register panko-expirer-active in cron to run once each hour
cron { 'panko-expirer':
ensure => 'present',
command => '/usr/bin/panko-expirer-active',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => 10,
hour => '*',
monthday => '*',
user => 'root',
}
}
}
class openstack::panko::firewall
inherits ::openstack::panko::params {
platform::firewall::rule { 'panko-api':
service_name => 'panko',
ports => $api_port,
}
}
class openstack::panko::haproxy
inherits ::openstack::panko::params {
platform::haproxy::proxy { 'panko-restapi':
server_name => 's-panko-restapi',
public_port => $api_port,
private_port => $api_port,
}
}
class openstack::panko::api
inherits ::openstack::panko::params {
include ::platform::params
# The panko user and service are always required and they
# are used by subclouds when the service itself is disabled
# on System Controller
# whether it creates the endpoint is determined by
# panko::keystone::auth::configure_endpoint which is
# set via sysinv puppet
if $::openstack::panko::params::service_create and
$::platform::params::init_keystone {
include ::panko::keystone::auth
}
if $service_enabled {
$api_workers = $::platform::params::eng_workers_by_2
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
$url_host = $::platform::network::mgmt::params::controller_address_url
if $::platform::params::init_database {
include ::panko::db::postgresql
}
file { '/usr/share/panko/panko-api.conf':
ensure => file,
content => template('openstack/panko-api.conf.erb'),
owner => 'root',
group => 'root',
mode => '0640',
}
-> class { '::panko::api':
host => $api_host,
workers => $api_workers,
sync_db => $::platform::params::init_database,
}
include ::openstack::panko::firewall
include ::openstack::panko::haproxy
}
}
class openstack::panko::runtime
inherits ::openstack::panko::params {
panko_config {
'database/event_time_to_live': value => $event_time_to_live;
}
}

View File

@ -1 +0,0 @@
bind='<%= @url_host %>:<%= @api_port %>'

View File

@ -1,2 +0,0 @@
bind='<%= @url_host %>:<%= @api_port %>'
workers=<%= @api_workers %>

View File

@ -1,3 +0,0 @@
bind='<%= @url_host %>:<%= @api_port %>'
workers=<%= @api_workers %>

View File

@ -1,392 +0,0 @@
---
archive_policy_default: ceilometer-low
archive_policies:
- name: ceilometer-low
aggregation_methods:
- mean
back_window: 0
definition:
- granularity: 5 minutes
timespan: 7 days
- name: ceilometer-low-rate
aggregation_methods:
- mean
- rate:mean
back_window: 0
definition:
- granularity: 5 minutes
timespan: 7 days
resources:
- resource_type: identity
metrics:
identity.authenticate.success:
identity.authenticate.pending:
identity.authenticate.failure:
identity.user.created:
identity.user.deleted:
identity.user.updated:
identity.group.created:
identity.group.deleted:
identity.group.updated:
identity.role.created:
identity.role.deleted:
identity.role.updated:
identity.project.created:
identity.project.deleted:
identity.project.updated:
identity.trust.created:
identity.trust.deleted:
identity.role_assignment.created:
identity.role_assignment.deleted:
- resource_type: ceph_account
metrics:
radosgw.objects:
radosgw.objects.size:
radosgw.objects.containers:
radosgw.api.request:
radosgw.containers.objects:
radosgw.containers.objects.size:
- resource_type: instance
metrics:
memory:
memory.usage:
memory.resident:
memory.swap.in:
memory.swap.out:
memory.bandwidth.total:
memory.bandwidth.local:
vcpus:
archive_policy_name: ceilometer-low-rate
vcpu_util:
cpu:
archive_policy_name: ceilometer-low-rate
cpu.delta:
cpu_util:
cpu_l3_cache:
disk.root.size:
disk.ephemeral.size:
disk.read.requests:
archive_policy_name: ceilometer-low-rate
disk.read.requests.rate:
disk.write.requests:
archive_policy_name: ceilometer-low-rate
disk.write.requests.rate:
disk.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.read.bytes.rate:
disk.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.write.bytes.rate:
disk.latency:
disk.iops:
disk.capacity:
disk.allocation:
disk.usage:
compute.instance.booting.time:
perf.cpu.cycles:
perf.instructions:
perf.cache.references:
perf.cache.misses:
attributes:
host: resource_metadata.(instance_host|host)
image_ref: resource_metadata.image_ref
display_name: resource_metadata.display_name
flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id)
flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name)
server_group: resource_metadata.user_metadata.server_group
event_delete: compute.instance.delete.start
event_attributes:
id: instance_id
event_associated_resources:
instance_network_interface: '{"=": {"instance_id": "%s"}}'
instance_disk: '{"=": {"instance_id": "%s"}}'
- resource_type: instance_network_interface
metrics:
network.outgoing.packets.rate:
network.incoming.packets.rate:
network.outgoing.packets:
archive_policy_name: ceilometer-low-rate
network.incoming.packets:
archive_policy_name: ceilometer-low-rate
network.outgoing.packets.drop:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.drop:
archive_policy_name: ceilometer-low-rate
network.outgoing.packets.error:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.error:
archive_policy_name: ceilometer-low-rate
network.outgoing.bytes.rate:
network.incoming.bytes.rate:
network.outgoing.bytes:
archive_policy_name: ceilometer-low-rate
network.incoming.bytes:
archive_policy_name: ceilometer-low-rate
attributes:
name: resource_metadata.vnic_name
instance_id: resource_metadata.instance_id
- resource_type: instance_disk
metrics:
disk.device.read.requests:
archive_policy_name: ceilometer-low-rate
disk.device.read.requests.rate:
disk.device.write.requests:
archive_policy_name: ceilometer-low-rate
disk.device.write.requests.rate:
disk.device.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.read.bytes.rate:
disk.device.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.write.bytes.rate:
disk.device.latency:
disk.device.read.latency:
disk.device.write.latency:
disk.device.iops:
disk.device.capacity:
disk.device.allocation:
disk.device.usage:
attributes:
name: resource_metadata.disk_name
instance_id: resource_metadata.instance_id
- resource_type: image
metrics:
image.size:
image.download:
image.serve:
attributes:
name: resource_metadata.name
container_format: resource_metadata.container_format
disk_format: resource_metadata.disk_format
event_delete: image.delete
event_attributes:
id: resource_id
- resource_type: ipmi
metrics:
hardware.ipmi.node.power:
hardware.ipmi.node.temperature:
hardware.ipmi.node.inlet_temperature:
hardware.ipmi.node.outlet_temperature:
hardware.ipmi.node.fan:
hardware.ipmi.node.current:
hardware.ipmi.node.voltage:
hardware.ipmi.node.airflow:
hardware.ipmi.node.cups:
hardware.ipmi.node.cpu_util:
hardware.ipmi.node.mem_util:
hardware.ipmi.node.io_util:
hardware.ipmi.temperature:
hardware.ipmi.voltage:
hardware.ipmi.current:
hardware.ipmi.fan:
- resource_type: network
metrics:
bandwidth:
ip.floating:
event_delete: floatingip.delete.end
event_attributes:
id: resource_id
- resource_type: stack
metrics:
stack.create:
stack.update:
stack.delete:
stack.resume:
stack.suspend:
- resource_type: swift_account
metrics:
storage.objects.incoming.bytes:
storage.objects.outgoing.bytes:
storage.api.request:
storage.objects.size:
storage.objects:
storage.objects.containers:
storage.containers.objects:
storage.containers.objects.size:
- resource_type: volume
metrics:
volume:
volume.size:
snapshot.size:
volume.snapshot.size:
volume.backup.size:
attributes:
display_name: resource_metadata.(display_name|name)
volume_type: resource_metadata.volume_type
event_delete: volume.delete.start
event_attributes:
id: resource_id
- resource_type: volume_provider
metrics:
volume.provider.capacity.total:
volume.provider.capacity.free:
volume.provider.capacity.allocated:
volume.provider.capacity.provisioned:
volume.provider.capacity.virtual_free:
- resource_type: volume_provider_pool
metrics:
volume.provider.pool.capacity.total:
volume.provider.pool.capacity.free:
volume.provider.pool.capacity.allocated:
volume.provider.pool.capacity.provisioned:
volume.provider.pool.capacity.virtual_free:
attributes:
provider: resource_metadata.provider
- resource_type: host
metrics:
hardware.cpu.load.1min:
hardware.cpu.load.5min:
hardware.cpu.load.15min:
hardware.cpu.util:
hardware.memory.total:
hardware.memory.used:
hardware.memory.swap.total:
hardware.memory.swap.avail:
hardware.memory.buffer:
hardware.memory.cached:
hardware.network.ip.outgoing.datagrams:
hardware.network.ip.incoming.datagrams:
hardware.system_stats.cpu.idle:
hardware.system_stats.io.outgoing.blocks:
hardware.system_stats.io.incoming.blocks:
attributes:
host_name: resource_metadata.resource_url
- resource_type: host_disk
metrics:
hardware.disk.size.total:
hardware.disk.size.used:
hardware.disk.read.bytes:
hardware.disk.write.bytes:
hardware.disk.read.requests:
hardware.disk.write.requests:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.device
- resource_type: host_network_interface
metrics:
hardware.network.incoming.bytes:
hardware.network.outgoing.bytes:
hardware.network.outgoing.errors:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.name
- resource_type: nova_compute
metrics:
compute.node.cpu.frequency:
compute.node.cpu.idle.percent:
compute.node.cpu.idle.time:
compute.node.cpu.iowait.percent:
compute.node.cpu.iowait.time:
compute.node.cpu.kernel.percent:
compute.node.cpu.kernel.time:
compute.node.cpu.percent:
compute.node.cpu.user.percent:
compute.node.cpu.user.time:
attributes:
host_name: resource_metadata.host
- resource_type: manila_share
metrics:
manila.share.size:
attributes:
name: resource_metadata.name
host: resource_metadata.host
status: resource_metadata.status
availability_zone: resource_metadata.availability_zone
protocol: resource_metadata.protocol
- resource_type: switch
metrics:
switch:
switch.ports:
attributes:
controller: resource_metadata.controller
- resource_type: switch_port
metrics:
switch.port:
switch.port.uptime:
switch.port.receive.packets:
switch.port.transmit.packets:
switch.port.receive.bytes:
switch.port.transmit.bytes:
switch.port.receive.drops:
switch.port.transmit.drops:
switch.port.receive.errors:
switch.port.transmit.errors:
switch.port.receive.frame_error:
switch.port.receive.overrun_error:
switch.port.receive.crc_error:
switch.port.collision.count:
attributes:
switch: resource_metadata.switch
port_number_on_switch: resource_metadata.port_number_on_switch
neutron_port_id: resource_metadata.neutron_port_id
controller: resource_metadata.controller
- resource_type: port
metrics:
port:
port.uptime:
port.receive.packets:
port.transmit.packets:
port.receive.bytes:
port.transmit.bytes:
port.receive.drops:
port.receive.errors:
attributes:
controller: resource_metadata.controller
- resource_type: switch_table
metrics:
switch.table.active.entries:
attributes:
controller: resource_metadata.controller
switch: resource_metadata.switch
- resource_type: vswitch_engine
metrics:
vswitch.engine.util:
vswitch.engine.receive.discard:
vswitch.engine.transmit.discard:
attributes:
host: resource_metadata.host
cpu_id: resource_metadata.cpu_id
- resource_type: vswitch_interface_and_port
metrics:
vswitch.interface.receive.errors:
vswitch.interface.transmit.errors:
vswitch.interface.receive.discards:
vswitch.interface.transmit.discards:
vswitch.port.receive.bytes:
vswitch.port.receive.packets:
vswitch.port.receive.util:
vswitch.port.transmit.bytes:
vswitch.port.transmit.packets:
vswitch.port.transmit.util:
vswitch.port.receive.errors:
vswitch.port.receive.missed:
vswitch.port.transmit.errors:
attributes:
host: resource_metadata.host
network_uuid: resource_metadata.network_uuid
network_id: resource_metadata.network_id
link-speed: resource_metadata.link-speed

View File

@ -3,9 +3,6 @@ https_enabled = <%= @enable_https %>
[auth]
lockout_period = <%= @lockout_period %>
lockout_retries = <%= @lockout_retries %>
[optional_tabs]
murano_enabled = <%= @murano_enabled %>
magnum_enabled = <%= @magnum_enabled %>
[deployment]
workers = <%= @workers %>

View File

@ -8,25 +8,14 @@
"admin_or_token_subject": "rule:admin_required or rule:token_subject",
"service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject",
"protected_domains": "'heat':%(target.domain.name)s or 'magnum':%(target.domain.name)s",
"protected_domains": "",
"protected_projects": "'admin':%(target.project.name)s or 'services':%(target.project.name)s",
"protected_admins": "'admin':%(target.user.name)s or 'heat_admin':%(target.user.name)s or 'dcmanager':%(target.user.name)s",
"protected_roles": "'admin':%(target.role.name)s or 'heat_admin':%(target.user.name)s",
"protected_services": [["'aodh':%(target.user.name)s"],
["'barbican':%(target.user.name)s"],
["'ceilometer':%(target.user.name)s"],
["'cinder':%(target.user.name)s"],
["'glance':%(target.user.name)s"],
["'heat':%(target.user.name)s"],
["'neutron':%(target.user.name)s"],
["'nova':%(target.user.name)s"],
"protected_admins": "'admin':%(target.user.name)s or 'dcmanager':%(target.user.name)s",
"protected_roles": "'admin':%(target.role.name)s",
"protected_services": [["'barbican':%(target.user.name)s"],
["'patching':%(target.user.name)s"],
["'sysinv':%(target.user.name)s"],
["'mtce':%(target.user.name)s"],
["'magnum':%(target.user.name)s"],
["'murano':%(target.user.name)s"],
["'panko':%(target.user.name)s"],
["'gnocchi':%(target.user.name)s"],
["'fm':%(target.user.name)s"]],
"identity:delete_service": "rule:admin_required and not rule:protected_services",

View File

@ -1,4 +0,0 @@
HOME=<%= @rabbit_home %>
NODE_PORT=<%= @port %>
RABBITMQ_MNESIA_BASE=<%= @mnesia_base %>
RABBITMQ_NODENAME=<%= @murano_rabbit_node %>

View File

@ -1,18 +0,0 @@
% This file managed by Puppet
% Template Path: rabbitmq/templates/rabbitmq.config
[
{rabbit, [
{tcp_listen_options,
<%= @rabbit_tcp_listen_options %>
},
{disk_free_limit, <%= @disk_free_limit %>},
{heartbeat, <%= @heartbeat %>},
{tcp_listen_options, <%= @tcp_listen_options %>},
{default_user, <<"<%= @default_user %>">>},
{default_pass, <<"<%= @default_pass %>">>}
]},
{kernel, [
]}
].
% EOF

View File

@ -1,30 +0,0 @@
% This file managed by Puppet
% Template Path: rabbitmq/templates/rabbitmq.config
[
{ssl, [{versions, ['<%= @tlsv2 %>', '<%= @tlsv1 %>']}]},
{rabbit, [
{tcp_listen_options,
<%= @rabbit_tcp_listen_options %>
},
{tcp_listeners, []},
{ssl_listeners, [{"<%= @ssl_interface %>", <%= @ssl_port %>}]},
{ssl_options, [
{cacertfile,"<%= @kombu_ssl_ca_certs %>"},
{certfile,"<%= @kombu_ssl_certfile %>"},
{keyfile,"<%= @kombu_ssl_keyfile %>"},
{verify,verify_none},
{fail_if_no_peer_cert,<%= @fail_if_no_peer_cert %>}
,{versions, ['<%= @tlsv2 %>', '<%= @tlsv1 %>']}
,{ciphers,<%= @rabbit_cipher_list %>}
]},
{disk_free_limit, <%= @disk_free_limit %>},
{heartbeat, <%= @heartbeat %>},
{tcp_listen_options, <%= @tcp_listen_options %>},
{default_user, <<"<%= @default_user %>">>},
{default_pass, <<"<%= @default_pass %>">>}
]},
{kernel, [
]}
].
% EOF

View File

@ -1,24 +0,0 @@
unset OS_SERVICE_TOKEN
export OS_ENDPOINT_TYPE=internalURL
export CINDER_ENDPOINT_TYPE=internalURL
export OS_USERNAME=<%= @admin_username %>
export OS_PASSWORD=`TERM=linux <%= @keyring_file %> 2>/dev/null`
export OS_AUTH_TYPE=password
export OS_AUTH_URL=<%= @identity_auth_url %>
export OS_PROJECT_NAME=<%= @admin_project_name %>
export OS_USER_DOMAIN_NAME=<%= @admin_user_domain %>
export OS_PROJECT_DOMAIN_NAME=<%= @admin_project_domain %>
export OS_IDENTITY_API_VERSION=<%= @identity_api_version %>
export OS_REGION_NAME=<%= @identity_region %>
export OS_KEYSTONE_REGION_NAME=<%= @keystone_identity_region %>
export OS_INTERFACE=internal
if [ ! -z "${OS_PASSWORD}" ]; then
export PS1='[\u@\h \W(keystone_$OS_USERNAME)]\$ '
else
echo 'Openstack Admin credentials can only be loaded from the active controller.'
export PS1='\h:\w\$ '
fi

View File

@ -1,12 +0,0 @@
unset OS_SERVICE_TOKEN
export OS_ENDPOINT_TYPE=internalURL
export CINDER_ENDPOINT_TYPE=internalURL
export OS_AUTH_URL=<%= @identity_auth_url %>
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=<%= @admin_user_domain %>
export OS_PROJECT_DOMAIN_NAME=<%= @admin_project_domain %>
export OS_IDENTITY_API_VERSION=<%= @identity_api_version %>
export OS_REGION_NAME=<%= @identity_region %>
export OS_KEYSTONE_REGION_NAME=<%= @keystone_identity_region %>

View File

@ -1,3 +0,0 @@
bind='<%= @url_host %>:<%= @api_port %>'
workers=<%= @api_workers %>

View File

@ -1,106 +0,0 @@
---
sources:
- name: meter_source
meters:
- "*"
sinks:
- meter_sink
- csv_sink
- name: cpu_source
meters:
- "cpu"
sinks:
- cpu_sink
- cpu_delta_sink
- vcpu_sink
- name: disk_source
meters:
- "disk.read.bytes"
- "disk.read.requests"
- "disk.write.bytes"
- "disk.write.requests"
- "disk.device.read.bytes"
- "disk.device.read.requests"
- "disk.device.write.bytes"
- "disk.device.write.requests"
sinks:
- disk_sink
- name: network_source
meters:
- "network.incoming.bytes"
- "network.incoming.packets"
- "network.outgoing.bytes"
- "network.outgoing.packets"
sinks:
- network_sink
sinks:
- name: meter_sink
transformers:
publishers:
- gnocchi://
- name: cpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "cpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
publishers:
- gnocchi://
- name: cpu_delta_sink
transformers:
- name: "delta"
parameters:
target:
name: "cpu.delta"
growth_only: True
publishers:
- gnocchi://
- name: vcpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "vcpu_util"
unit: "%"
type: "gauge"
max: 100
scale: "100.0 / (10**9 * (resource_metadata.vcpu_number or 1))"
publishers:
- gnocchi://
- name: disk_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
unit: "(B|request)"
target:
map_to:
name: "\\1.\\2.\\3.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: network_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
unit: "(B|packet)"
target:
map_to:
name: "network.\\1.\\2.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- gnocchi://
- name: csv_sink
publishers:
- csvfile:///opt/cgcs/ceilometer/csv/pm.csv?max_bytes=10000000&backup_count=5&compress=True&enabled=True

View File

@ -1,68 +0,0 @@
---
sources:
- name: instance_pollster
interval: <%= @instance_polling_interval %>
meters:
- disk.read.bytes
- disk.read.bytes.rate
- disk.read.requests
- disk.read.requests.rate
- disk.write.bytes
- disk.write.bytes.rate
- disk.write.requests
- disk.write.requests.rate
- disk.capacity
- disk.allocation
- disk.usage
- name: instance_cpu_pollster
interval: <%= @instance_cpu_polling_interval %>
meters:
- cpu
- name: instance_disk_pollster
interval: <%= @instance_disk_polling_interval %>
meters:
- disk.device.read.requests
- disk.device.read.requests.rate
- disk.device.write.requests
- disk.device.write.requests.rate
- disk.device.read.bytes
- disk.device.read.bytes.rate
- disk.device.write.bytes
- disk.device.write.bytes.rate
- disk.device.capacity
- disk.device.allocation
- disk.device.usage
- name: ipmi_pollster
interval: <%= @ipmi_polling_interval %>
meters:
- hardware.ipmi.node.power
- hardware.ipmi.node.temperature
- hardware.ipmi.node.outlet_temperature
- hardware.ipmi.node.airflow
- hardware.ipmi.node.cups
- hardware.ipmi.node.cpu_util
- hardware.ipmi.node.mem_util
- hardware.ipmi.node.io_util
- hardware.ipmi.temperature
- hardware.ipmi.voltage
- hardware.ipmi.current
- hardware.ipmi.fan
- name: ceph_pollster
interval: <%= @ceph_polling_interval %>
meters:
- radosgw.objects
- radosgw.objects.size
- radosgw.objects.containers
- radosgw.api.request
- radosgw.containers.objects
- radosgw.containers.objects.size
- name: image_pollster
interval: <%= @image_polling_interval %>
meters:
- image.size
- name: volume_pollster
interval: <%= @volume_polling_interval %>
meters:
- volume.size
- volume.snapshot.size
- volume.backup.size

View File

@ -22,6 +22,11 @@ class platform::client
group => 'root',
content => template('platform/openrc.admin.erb'),
}
-> file {'/etc/bash_completion.d/openstack':
ensure => 'present',
mode => '0644',
content => generate('/usr/bin/openstack', 'complete'),
}
}
class platform::client::credentials::params (

View File

@ -322,7 +322,6 @@ class platform::compute::pmqos (
class platform::compute {
Class[$name] -> Class['::platform::vswitch']
Class[$name] -> Class['::nova::compute']
require ::platform::compute::grub::audit
require ::platform::compute::hugetlbf

View File

@ -47,7 +47,6 @@ class platform::dcorch
class platform::dcorch::firewall
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::openstack::cinder::params
platform::firewall::rule { 'dcorch-api':
service_name => 'dcorch',
ports => $api_port,
@ -64,11 +63,9 @@ class platform::dcorch::firewall
service_name => 'dcorch-neutron-api-proxy',
ports => $neutron_api_proxy_port,
}
if $::openstack::cinder::params::service_enabled {
platform::firewall::rule { 'dcorch-cinder-api-proxy':
service_name => 'dcorch-cinder-api-proxy',
ports => $cinder_api_proxy_port,
}
platform::firewall::rule { 'dcorch-cinder-api-proxy':
service_name => 'dcorch-cinder-api-proxy',
ports => $cinder_api_proxy_port,
}
platform::firewall::rule { 'dcorch-patch-api-proxy':
service_name => 'dcorch-patch-api-proxy',
@ -85,7 +82,6 @@ class platform::dcorch::firewall
class platform::dcorch::haproxy
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::openstack::cinder::params
platform::haproxy::proxy { 'dcorch-neutron-api-proxy':
server_name => 's-dcorch-neutron-api-proxy',
public_port => $neutron_api_proxy_port,
@ -101,12 +97,10 @@ class platform::dcorch::haproxy
public_port => $sysinv_api_proxy_port,
private_port => $sysinv_api_proxy_port,
}
if $::openstack::cinder::params::service_enabled {
platform::haproxy::proxy { 'dcorch-cinder-api-proxy':
server_name => 's-cinder-dc-api-proxy',
public_port => $cinder_api_proxy_port,
private_port => $cinder_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-cinder-api-proxy':
server_name => 's-cinder-dc-api-proxy',
public_port => $cinder_api_proxy_port,
private_port => $cinder_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-patch-api-proxy':
server_name => 's-dcorch-patch-api-proxy',

View File

@ -48,21 +48,6 @@ class platform::dns::dnsmasq {
$infra_subnet_netmask = $::platform::network::infra::params::subnet_prefixlen
}
include ::openstack::ironic::params
$ironic_tftp_dir_version = $::platform::params::software_version
$ironic_tftpboot_dir = $::openstack::ironic::params::ironic_tftpboot_dir
case $::hostname {
$::platform::params::controller_0_hostname: {
$ironic_tftp_interface = $::openstack::ironic::params::controller_0_if
}
$::platform::params::controller_1_hostname: {
$ironic_tftp_interface = $::openstack::ironic::params::controller_1_if
}
default: {
$ironic_tftp_interface = undef
}
}
include ::platform::kubernetes::params
$service_domain = $::platform::kubernetes::params::service_domain
$dns_service_ip = $::platform::kubernetes::params::dns_service_ip

View File

@ -238,13 +238,9 @@ class platform::drbd::extension (
) inherits ::platform::drbd::extension::params {
include ::platform::params
include ::openstack::cinder::params
include ::platform::drbd::cgcs::params
if ($::platform::params::system_mode != 'simplex' and
'lvm' in $::openstack::cinder::params::enabled_backends) {
$resync_after = $::openstack::cinder::params::drbd_resource
} elsif str2bool($::is_primary_disk_rotational) {
if str2bool($::is_primary_disk_rotational) {
$resync_after = $::platform::drbd::cgcs::params::resource_name
} else {
$resync_after = undef
@ -448,14 +444,9 @@ class platform::drbd(
$service_enable = false,
$service_ensure = 'stopped',
) {
if (str2bool($::is_initial_config_primary) or
('lvm' in $openstack::cinder::params::enabled_backends and
str2bool($::is_standalone_controller) and str2bool($::is_node_cinder_lvm_config))
if (str2bool($::is_initial_config_primary)
){
# Enable DRBD in two cases:
# 1) At config_controller,
# 2) When cinder volumes disk is replaced on a standalone controller
# (e.g. AIO SX).
# Enable DRBD at config_controller
class { '::drbd':
service_enable => true,
service_ensure => 'running',

View File

@ -200,8 +200,6 @@ class platform::filesystem::img_conversions::params (
class platform::filesystem::img_conversions
inherits ::platform::filesystem::img_conversions::params {
include ::openstack::cinder::params
include ::openstack::glance::params
platform::filesystem { $lv_name:
lv_name => $lv_name,
@ -308,8 +306,6 @@ class platform::filesystem::docker::runtime {
class platform::filesystem::img_conversions::runtime {
include ::platform::filesystem::img_conversions::params
include ::openstack::cinder::params
include ::openstack::glance::params
$lv_name = $::platform::filesystem::img_conversions::params::lv_name
$lv_size = $::platform::filesystem::img_conversions::params::lv_size
$devmapper = $::platform::filesystem::img_conversions::params::devmapper

View File

@ -142,15 +142,6 @@ class platform::haproxy::runtime {
include ::platform::dcorch::haproxy
}
include ::openstack::keystone::haproxy
include ::openstack::neutron::haproxy
include ::openstack::glance::haproxy
include ::openstack::cinder::haproxy
include ::openstack::heat::haproxy
include ::openstack::murano::haproxy
include ::openstack::magnum::haproxy
include ::openstack::ironic::haproxy
include ::openstack::panko::haproxy
include ::openstack::gnocchi::haproxy
include ::openstack::swift::haproxy
include ::openstack::barbican::haproxy

View File

@ -25,9 +25,6 @@ class platform::mtce::params (
class platform::mtce
inherits ::platform::mtce::params {
include ::openstack::ceilometer::params
$ceilometer_port = $::openstack::ceilometer::params::api_port
include ::platform::client::credentials::params
$keyring_directory = $::platform::client::credentials::params::keyring_directory

View File

@ -197,22 +197,9 @@ class platform::postgresql::upgrade
-> class {'::postgresql::server':
}
include ::aodh::db::postgresql
include ::barbican::db::postgresql
include ::cinder::db::postgresql
include ::glance::db::postgresql
include ::gnocchi::db::postgresql
include ::heat::db::postgresql
include ::murano::db::postgresql
include ::magnum::db::postgresql
include ::neutron::db::postgresql
include ::nova::db::postgresql
include ::nova::db::postgresql_api
include ::panko::db::postgresql
include ::sysinv::db::postgresql
include ::keystone::db::postgresql
include ::ironic::db::postgresql
include ::fm::db::postgresql
}

View File

@ -102,9 +102,6 @@ class platform::sm
$amqp_server_port = $::platform::amqp::params::port
$rabbit_node_name = $::platform::amqp::params::node
$rabbit_mnesia_base = "/var/lib/rabbitmq/${platform_sw_version}/mnesia"
$murano_rabbit_node_name = "murano-${rabbit_node_name}"
$murano_rabbit_mnesia_base = "/var/lib/rabbitmq/murano/${platform_sw_version}/mnesia"
$murano_rabbit_config_file = '/etc/rabbitmq/murano-rabbitmq'
include ::platform::ldap::params
$ldapserver_remote = $::platform::ldap::params::ldapserver_remote
@ -112,10 +109,6 @@ class platform::sm
# This variable is used also in create_sm_db.sql.
# please change that one as well when modifying this variable
$rabbit_pid = '/var/run/rabbitmq/rabbitmq.pid'
$murano_rabbit_env_config_file = '/etc/rabbitmq/murano-rabbitmq-env.conf'
$murano_rabbit_pid = '/var/run/rabbitmq/murano-rabbit.pid'
$murano_rabbit_dist_port = 25673
$rabbitmq_server = '/usr/lib/rabbitmq/bin/rabbitmq-server'
$rabbitmqctl = '/usr/lib/rabbitmq/bin/rabbitmqctl'
@ -180,62 +173,10 @@ class platform::sm
$os_user_domain_name = $::platform::client::params::admin_user_domain
$os_project_domain_name = $::platform::client::params::admin_project_domain
# Nova
$db_server_port = '5432'
include ::openstack::nova::params
$novnc_console_port = $::openstack::nova::params::nova_novnc_port
# Heat
include ::openstack::heat::params
$heat_api_cfn_port = $::openstack::heat::params::cfn_port
$heat_api_cloudwatch_port = $::openstack::heat::params::cloudwatch_port
$heat_api_port = $::openstack::heat::params::api_port
# Neutron
include ::openstack::neutron::params
$neutron_region_name = $::openstack::neutron::params::region_name
$neutron_plugin_config = '/etc/neutron/plugin.ini'
$neutron_sriov_plugin_config = '/etc/neutron/plugins/ml2/ml2_conf_sriov.ini'
# Cinder
include ::openstack::cinder::params
$cinder_service_enabled = $::openstack::cinder::params::service_enabled
$cinder_region_name = $::openstack::cinder::params::region_name
$cinder_backends = $::openstack::cinder::params::enabled_backends
$cinder_drbd_resource = $::openstack::cinder::params::drbd_resource
$cinder_vg_name = $::openstack::cinder::params::cinder_vg_name
# Glance
include ::openstack::glance::params
$glance_region_name = $::openstack::glance::params::region_name
$glance_cached = $::openstack::glance::params::glance_cached
# Murano
include ::openstack::murano::params
$disable_murano_agent = $::openstack::murano::params::disable_murano_agent
# Magnum
include ::openstack::magnum::params
# Ironic
include ::openstack::ironic::params
$ironic_tftp_ip = $::openstack::ironic::params::tftp_server
$ironic_controller_0_nic = $::openstack::ironic::params::controller_0_if
$ironic_controller_1_nic = $::openstack::ironic::params::controller_1_if
$ironic_netmask = $::openstack::ironic::params::netmask
$ironic_tftproot = $::openstack::ironic::params::ironic_tftpboot_dir
# Ceph-Rados-Gateway
include ::platform::ceph::params
$ceph_configured = $::platform::ceph::params::service_enabled
# Gnocchi
include ::openstack::gnocchi::params
# Panko
include ::openstack::panko::params
if $system_mode == 'simplex' {
$hostunit = '0'
$management_my_unit_ip = $::platform::network::mgmt::params::controller0_address
@ -282,16 +223,6 @@ class platform::sm
shell => '/bin/sh'
}
# Workaround for the time being to prevent SM from enabling the openstack
# services when kubernetes is enabled to avoid making changes to individual
# openstack manifests
$heat_service_enabled = false
$murano_configured = false
$ironic_configured = false
$magnum_configured = false
$gnocchi_enabled = false
$panko_enabled = false
# lint:ignore:140chars
if str2bool($::is_virtual) {
@ -503,36 +434,8 @@ class platform::sm
}
$configure_keystone = false
}
if $glance_region_name != $region_2_name {
$configure_glance = false
exec { 'Deprovision OpenStack - Glance Registry (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-registry',
}
-> exec { 'Deprovision OpenStack - Glance Registry (service)':
command => 'sm-deprovision service glance-registry',
}
-> exec { 'Deprovision OpenStack - Glance API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-api',
}
-> exec { 'Deprovision OpenStack - Glance API (service)':
command => 'sm-deprovision service glance-api',
}
} else {
$configure_glance = true
if $glance_cached {
exec { 'Deprovision OpenStack - Glance Registry (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-registry',
}
-> exec { 'Deprovision OpenStack - Glance Registry (service)':
command => 'sm-deprovision service glance-registry',
}
}
}
} else {
$configure_keystone = true
$configure_glance = false
}
if $configure_keystone {
@ -556,173 +459,53 @@ class platform::sm
}
}
if $configure_glance {
if !$glance_cached {
exec { 'Configure OpenStack - Glance Registry':
command => "sm-configure service_instance glance-registry glance-registry \"config=/etc/glance/glance-registry.conf,user=root,os_username=${os_username},os_project_name=${os_project_name},os_user_domain_name=${os_user_domain_name},os_project_domain_name=${os_project_domain_name},keystone_get_token_url=${os_auth_url}/tokens\"",
}
-> exec { 'Provision OpenStack - Glance Registry (service-group-member)':
command => 'sm-provision service-group-member cloud-services glance-registry',
}
-> exec { 'Provision OpenStack - Glance Registry (service)':
command => 'sm-provision service glance-registry',
}
}
exec { 'Configure OpenStack - Glance API':
command => "sm-configure service_instance glance-api glance-api \"config=/etc/glance/glance-api.conf,user=root,os_username=${os_username},os_project_name=${os_project_name},os_user_domain_name=${os_user_domain_name},os_project_domain_name=${os_project_domain_name},os_auth_url=${os_auth_url}\"",
}
-> exec { 'Provision OpenStack - Glance API (service-group-member)':
command => 'sm-provision service-group-member cloud-services glance-api',
}
-> exec { 'Provision OpenStack - Glance API (service)':
command => 'sm-provision service glance-api',
}
} else {
# Deprovision Glance API and Glance Registry incase of a kubernetes config
exec { 'Deprovision OpenStack - Glance Registry (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-registry',
}
-> exec { 'Deprovision OpenStack - Glance Registry(service)':
command => 'sm-deprovision service glance-registry',
}
exec { 'Deprovision OpenStack - Glance API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-api',
}
-> exec { 'Deprovision OpenStack - Glance API(service)':
command => 'sm-deprovision service glance-api',
}
# Glance
exec { 'Deprovision OpenStack - Glance Registry (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-registry',
}
-> exec { 'Deprovision OpenStack - Glance Registry(service)':
command => 'sm-deprovision service glance-registry',
}
if $cinder_service_enabled {
exec { 'Configure OpenStack - Cinder API':
command => "sm-configure service_instance cinder-api cinder-api \"config=/etc/cinder/cinder.conf,user=root,os_username=${os_username},os_project_name=${os_project_name},os_user_domain_name=${os_user_domain_name},os_project_domain_name=${os_project_domain_name},keystone_get_token_url=${os_auth_url}/tokens\"",
}
-> exec { 'Provision OpenStack - Cinder API (service-group-member)':
command => 'sm-provision service-group-member cloud-services cinder-api',
}
-> exec { 'Provision OpenStack - Cinder API (service)':
command => 'sm-provision service cinder-api',
}
exec { 'Deprovision OpenStack - Glance API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services glance-api',
}
-> exec { 'Deprovision OpenStack - Glance API(service)':
command => 'sm-deprovision service glance-api',
}
exec { 'Configure OpenStack - Cinder Scheduler':
command => "sm-configure service_instance cinder-scheduler cinder-scheduler \"config=/etc/cinder/cinder.conf,user=root,amqp_server_port=${amqp_server_port}\"",
}
-> exec { 'Provision OpenStack - Cinder Scheduler (service-group-member)':
command => 'sm-provision service-group-member cloud-services cinder-scheduler',
}
-> exec { 'Provision OpenStack - Cinder Scheduler (service)':
command => 'sm-provision service cinder-scheduler',
}
exec { 'Configure OpenStack - Cinder Volume':
command => "sm-configure service_instance cinder-volume cinder-volume \"config=/etc/cinder/cinder.conf,user=root,amqp_server_port=${amqp_server_port},multibackend=true\"",
}
-> exec { 'Provision OpenStack - Cinder Volume (service-group-member)':
command => 'sm-provision service-group-member cloud-services cinder-volume',
}
-> exec { 'Configure Cinder Volume in SM':
command => 'sm-provision service cinder-volume',
}
exec { 'Configure OpenStack - Cinder Backup':
command => "sm-configure service_instance cinder-backup cinder-backup \"config=/etc/cinder/cinder.conf,user=root,amqp_server_port=${amqp_server_port}\"",
}
-> exec { 'Provision OpenStack - Cinder Backup (service-group-member)':
command => 'sm-provision service-group-member cloud-services cinder-backup',
}
-> exec { 'Provision OpenStack - Cinder Backup (service)':
command => 'sm-provision service cinder-backup',
}
if 'lvm' in $cinder_backends {
# Cinder DRBD
exec { 'Configure Cinder LVM in SM (service-group-member drbd-cinder)':
command => 'sm-provision service-group-member controller-services drbd-cinder',
}
-> exec { 'Configure Cinder LVM in SM (service drbd-cinder)':
command => 'sm-provision service drbd-cinder',
}
# Cinder LVM
-> exec { 'Configure Cinder LVM in SM (service-group-member cinder-lvm)':
command => 'sm-provision service-group-member controller-services cinder-lvm',
}
-> exec { 'Configure Cinder LVM in SM (service cinder-lvm)':
command => 'sm-provision service cinder-lvm',
}
# TGTd
-> exec { 'Configure Cinder LVM in SM (service-group-member iscsi)':
command => 'sm-provision service-group-member controller-services iscsi',
}
-> exec { 'Configure Cinder LVM in SM (service iscsi)':
command => 'sm-provision service iscsi',
}
-> exec { 'Configure Cinder DRBD service instance':
command => "sm-configure service_instance drbd-cinder drbd-cinder:${hostunit} drbd_resource=${cinder_drbd_resource}",
}
exec { 'Configure Cinder LVM service instance':
command => "sm-configure service_instance cinder-lvm cinder-lvm \"rmon_rsc_name=volume-storage,volgrpname=${cinder_vg_name}\"",
}
exec { 'Configure iscsi service instance':
command => "sm-configure service_instance iscsi iscsi \"\"",
}
# Cinder IP
exec { 'Configure Cinder LVM in SM (service-group-member cinder-ip)':
command => 'sm-provision service-group-member controller-services cinder-ip',
}
-> exec { 'Configure Cinder LVM in SM (service cinder-ip)':
command => 'sm-provision service cinder-ip',
}
if $system_mode == 'duplex-direct' or $system_mode == 'simplex' {
exec { 'Configure Cinder IP service instance':
command => "sm-configure service_instance cinder-ip cinder-ip \"ip=${cinder_ip_param_ip},cidr_netmask=${cinder_ip_param_mask},nic=${cinder_ip_interface},arp_count=7,dc=yes\"",
}
} else {
exec { 'Configure Cinder IP service instance':
command => "sm-configure service_instance cinder-ip cinder-ip \"ip=${cinder_ip_param_ip},cidr_netmask=${cinder_ip_param_mask},nic=${cinder_ip_interface},arp_count=7\"",
}
}
}
} else {
exec { 'Deprovision OpenStack - Cinder API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-api',
}
-> exec { 'Deprovision OpenStack - Cinder API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-api',
}
-> exec { 'Deprovision OpenStack - Cinder Scheduler (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-scheduler',
}
-> exec { 'Deprovision OpenStack - Cinder Scheduler (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-scheduler',
}
-> exec { 'Deprovision OpenStack - Cinder Volume (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-volume',
}
-> exec { 'Deprovision OpenStack - Cinder Volume (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-volume',
}
-> exec { 'Deprovision OpenStack - Cinder Backup (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-backup',
}
-> exec { 'Deprovision OpenStack - Cinder Backup (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-backup',
}
# Cinder
exec { 'Deprovision OpenStack - Cinder API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-api',
}
-> exec { 'Deprovision OpenStack - Cinder API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-api',
}
-> exec { 'Deprovision OpenStack - Cinder Scheduler (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-scheduler',
}
-> exec { 'Deprovision OpenStack - Cinder Scheduler (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-scheduler',
}
-> exec { 'Deprovision OpenStack - Cinder Volume (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-volume',
}
-> exec { 'Deprovision OpenStack - Cinder Volume (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-volume',
}
-> exec { 'Deprovision OpenStack - Cinder Backup (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services cinder-backup',
}
-> exec { 'Deprovision OpenStack - Cinder Backup (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service cinder-backup',
}
# Re-using cinder-ip for cluster-host-ip for now
@ -745,30 +528,6 @@ class platform::sm
}
}
# TODO: revisit region mode
if $region_config {
if $neutron_region_name != $region_2_name {
$configure_neturon = false
exec { 'Deprovision OpenStack - Neutron Server (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services neutron-server',
}
-> exec { 'Deprovision OpenStack - Neutron Server (service)':
command => 'sm-deprovision service neutron-server',
}
} else {
$configure_neturon = true
}
} else {
$configure_neturon = false
}
if $configure_neturon {
exec { 'Configure OpenStack - Neutron Server':
command => "sm-configure service_instance neutron-server neutron-server \"config=/etc/neutron/neutron.conf,plugin_config=${neutron_plugin_config},sriov_plugin_config=${neutron_sriov_plugin_config},user=root,os_username=${os_username},os_project_name=${os_project_name},os_user_domain_name=${os_user_domain_name},os_project_domain_name=${os_project_domain_name},keystone_get_token_url=${os_auth_url}/tokens\"",
}
}
# TODO: this entire section needs to be removed from SM.
# After these are removed from SM, this entire section of
# deprovision calls will not be needed
@ -840,91 +599,62 @@ class platform::sm
command => 'sm-deprovision service neutron-server',
}
if $heat_service_enabled {
exec { 'Configure OpenStack - Heat Engine':
command => "sm-configure service_instance heat-engine heat-engine \"config=/etc/heat/heat.conf,user=root,database_server_port=${db_server_port},amqp_server_port=${amqp_server_port}\"",
}
# Heat
exec { 'Deprovision OpenStack - Heat Engine (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-engine',
}
-> exec { 'Deprovision OpenStack - Heat Engine(service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-engine',
}
exec { 'Configure OpenStack - Heat API':
command => "sm-configure service_instance heat-api heat-api \"config=/etc/heat/heat.conf,user=root,server_port=${heat_api_port}\"",
}
exec { 'Deprovision OpenStack - Heat API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-api',
}
-> exec { 'Deprovision OpenStack - Heat API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-api',
}
exec { 'Configure OpenStack - Heat API CFN':
command => "sm-configure service_instance heat-api-cfn heat-api-cfn \"config=/etc/heat/heat.conf,user=root,server_port=${heat_api_cfn_port}\"",
}
exec { 'Deprovision OpenStack - Heat API CFN (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-api-cfn',
}
-> exec { 'Deprovision OpenStack - Heat API CFN (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-api-cfn',
}
exec { 'Configure OpenStack - Heat API CloudWatch':
command => "sm-configure service_instance heat-api-cloudwatch heat-api-cloudwatch \"config=/etc/heat/heat.conf,user=root,server_port=${heat_api_cloudwatch_port}\"",
}
} else {
exec { 'Deprovision OpenStack - Heat Engine (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-engine',
}
-> exec { 'Deprovision OpenStack - Heat Engine(service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-engine',
}
exec { 'Deprovision OpenStack - Heat API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-api',
}
-> exec { 'Deprovision OpenStack - Heat API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-api',
}
exec { 'Deprovision OpenStack - Heat API CFN (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-api-cfn',
}
-> exec { 'Deprovision OpenStack - Heat API CFN (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-api-cfn',
}
exec { 'Deprovision OpenStack - Heat API CloudWatch (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-api-cloudwatch',
}
-> exec { 'Deprovision OpenStack - Heat API CloudWatch (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-api-cloudwatch',
}
exec { 'Deprovision OpenStack - Heat API CloudWatch (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services heat-api-cloudwatch',
}
-> exec { 'Deprovision OpenStack - Heat API CloudWatch (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service heat-api-cloudwatch',
}
# Gnocchi
if $gnocchi_enabled {
exec { 'Configure OpenStack - Gnocchi API':
command => "sm-configure service_instance gnocchi-api gnocchi-api \"config=/etc/gnocchi/gnocchi.conf\"",
}
exec { 'Configure OpenStack - Gnocchi metricd':
command => "sm-configure service_instance gnocchi-metricd gnocchi-metricd \"config=/etc/gnocchi/gnocchi.conf\"",
}
} else {
exec { 'Deprovision OpenStack - Gnocchi API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services gnocchi-api',
}
-> exec { 'Deprovision OpenStack - Gnocchi API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service gnocchi-api',
}
exec { 'Deprovision OpenStack - Gnocchi metricd (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services gnocchi-metricd',
}
-> exec { 'Deprovision OpenStack - Gnocchi metricd (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service gnocchi-metricd',
}
exec { 'Deprovision OpenStack - Gnocchi API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services gnocchi-api',
}
-> exec { 'Deprovision OpenStack - Gnocchi API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service gnocchi-api',
}
exec { 'Deprovision OpenStack - Gnocchi metricd (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services gnocchi-metricd',
}
-> exec { 'Deprovision OpenStack - Gnocchi metricd (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service gnocchi-metricd',
}
# AODH (not enabled)
# AODH
exec { 'Deprovision OpenStack - AODH API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services aodh-api',
@ -962,64 +692,15 @@ class platform::sm
}
# Panko
if $panko_enabled {
exec { 'Configure OpenStack - Panko API':
command => "sm-configure service_instance panko-api panko-api \"config=/etc/panko/panko.conf\"",
}
} else {
exec { 'Deprovision OpenStack - Panko API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services panko-api',
}
-> exec { 'Deprovision OpenStack - Panko API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service panko-api',
}
exec { 'Deprovision OpenStack - Panko API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services panko-api',
}
# Murano
exec { 'Configure OpenStack - Murano API':
command => "sm-configure service_instance murano-api murano-api \"config=/etc/murano/murano.conf\"",
-> exec { 'Deprovision OpenStack - Panko API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service panko-api',
}
exec { 'Configure OpenStack - Murano Engine':
command => "sm-configure service_instance murano-engine murano-engine \"config=/etc/murano/murano.conf\"",
}
# Magnum
exec { 'Configure OpenStack - Magnum API':
command => "sm-configure service_instance magnum-api magnum-api \"config=/etc/magnum/magnum.conf\"",
}
exec { 'Configure OpenStack - Magnum Conductor':
command => "sm-configure service_instance magnum-conductor magnum-conductor \"config=/etc/magnum/magnum.conf\"",
}
# Ironic
exec { 'Configure OpenStack - Ironic API':
command => "sm-configure service_instance ironic-api ironic-api \"config=/etc/ironic/ironic.conf\"",
}
exec { 'Configure OpenStack - Ironic Conductor':
command => "sm-configure service_instance ironic-conductor ironic-conductor \"config=/etc/ironic/ironic.conf,tftproot=${ironic_tftproot}\"",
}
exec { 'Configure OpenStack - Nova Compute':
command => "sm-configure service_instance nova-compute nova-compute \"config=/etc/nova/nova-ironic.conf\"",
}
exec { 'Configure OpenStack - Nova Serialproxy':
command => "sm-configure service_instance nova-serialproxy nova-serialproxy \"config=/etc/nova/nova-ironic.conf\"",
}
#exec { 'Configure Power Management Conductor':
# command => "sm-configure service_instance power-mgmt-conductor power-mgmt-conductor \"config=/etc/power_mgmt/power-mgmt-conductor.ini\"",
#}
#exec { 'Configure Power Management API':
# command => "sm-configure service_instance power-mgmt-api power-mgmt-api \"config=/etc/power_mgmt/power-mgmt-api.ini\"",
#}
exec { 'Configure NFS Management':
command => "sm-configure service_instance nfs-mgmt nfs-mgmt \"exports=${nfs_server_mgmt_exports},mounts=${nfs_server_mgmt_mounts}\"",
}
@ -1250,42 +931,14 @@ class platform::sm
}
}
exec { 'Configure Murano Rabbit':
command => "sm-configure service_instance murano-rabbit murano-rabbit \"server=${rabbitmq_server},ctl=${rabbitmqctl},nodename=${murano_rabbit_node_name},mnesia_base=${murano_rabbit_mnesia_base},ip=${oam_ip_param_ip},config_file=${murano_rabbit_config_file},env_config_file=${murano_rabbit_env_config_file},pid_file=${murano_rabbit_pid},dist_port=${murano_rabbit_dist_port}\"",
}
# optionally bring up/down Murano and murano agent's rabbitmq
if $disable_murano_agent {
# Murano
exec { 'Deprovision Murano Rabbitmq (service-group-member)':
command => 'sm-deprovision service-group-member controller-services murano-rabbit',
}
-> exec { 'Deprovision Murano Rabbitmq (service)':
command => 'sm-deprovision service murano-rabbit',
}
} else {
exec { 'Provision Murano Rabbitmq (service-group-member)':
command => 'sm-provision service-group-member controller-services murano-rabbit',
}
-> exec { 'Provision Murano Rabbitmq (service)':
command => 'sm-provision service murano-rabbit',
}
}
if $murano_configured {
exec { 'Provision OpenStack - Murano API (service-group-member)':
command => 'sm-provision service-group-member cloud-services murano-api',
}
-> exec { 'Provision OpenStack - Murano API (service)':
command => 'sm-provision service murano-api',
}
-> exec { 'Provision OpenStack - Murano Engine (service-group-member)':
command => 'sm-provision service-group-member cloud-services murano-engine',
}
-> exec { 'Provision OpenStack - Murano Engine (service)':
command => 'sm-provision service murano-engine',
}
} else {
exec { 'Deprovision OpenStack - Murano API (service-group-member)':
-> exec { 'Deprovision OpenStack - Murano API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services murano-api',
}
-> exec { 'Deprovision OpenStack - Murano API (service)':
@ -1297,23 +950,8 @@ class platform::sm
-> exec { 'Deprovision OpenStack - Murano Engine (service)':
command => 'sm-deprovision service murano-engine',
}
}
# optionally bring up/down Magnum
if $magnum_configured {
exec { 'Provision OpenStack - Magnum API (service-group-member)':
command => 'sm-provision service-group-member cloud-services magnum-api',
}
-> exec { 'Provision OpenStack - Magnum API (service)':
command => 'sm-provision service magnum-api',
}
-> exec { 'Provision OpenStack - Magnum Conductor (service-group-member)':
command => 'sm-provision service-group-member cloud-services magnum-conductor',
}
-> exec { 'Provision OpenStack - Magnum Conductor (service)':
command => 'sm-provision service magnum-conductor',
}
} else {
# Magnum
exec { 'Deprovision OpenStack - Magnum API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services magnum-api',
}
@ -1326,88 +964,37 @@ class platform::sm
-> exec { 'Deprovision OpenStack - Magnum Conductor (service)':
command => 'sm-deprovision service magnum-conductor',
}
# Ironic
exec { 'Deprovision OpenStack - Ironic API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services ironic-api',
}
# optionally bring up/down Ironic
if $ironic_configured {
exec { 'Provision OpenStack - Ironic API (service-group-member)':
command => 'sm-provision service-group-member cloud-services ironic-api',
}
-> exec { 'Provision OpenStack - Ironic API (service)':
command => 'sm-provision service ironic-api',
}
-> exec { 'Provision OpenStack - Ironic Conductor (service-group-member)':
command => 'sm-provision service-group-member cloud-services ironic-conductor',
}
-> exec { 'Provision OpenStack - Ironic Conductor (service)':
command => 'sm-provision service ironic-conductor',
}
-> exec { 'Provision OpenStack - Nova Compute (service-group-member)':
command => 'sm-provision service-group-member cloud-services nova-compute',
}
-> exec { 'Provision OpenStack - Nova Compute (service)':
command => 'sm-provision service nova-compute',
}
-> exec { 'Provision OpenStack - Nova Serialproxy (service-group-member)':
command => 'sm-provision service-group-member cloud-services nova-serialproxy',
}
-> exec { 'Provision OpenStack - Nova Serialproxy (service)':
command => 'sm-provision service nova-serialproxy',
}
if $ironic_tftp_ip != undef {
case $::hostname {
$controller_0_hostname: {
exec { 'Configure Ironic TFTP IP service instance':
command => "sm-configure service_instance ironic-tftp-ip ironic-tftp-ip \"ip=${ironic_tftp_ip},cidr_netmask=${ironic_netmask},nic=${ironic_controller_0_nic},arp_count=7\"",
}
}
$controller_1_hostname: {
exec { 'Configure Ironic TFTP IP service instance':
command => "sm-configure service_instance ironic-tftp-ip ironic-tftp-ip \"ip=${ironic_tftp_ip},cidr_netmask=${ironic_netmask},nic=${ironic_controller_1_nic},arp_count=7\"",
}
}
default: {
}
}
exec { 'Provision Ironic TFTP Floating IP (service-group-member)':
command => 'sm-provision service-group-member controller-services ironic-tftp-ip',
}
-> exec { 'Provision Ironic TFTP Floating IP (service)':
command => 'sm-provision service ironic-tftp-ip',
}
}
} else {
exec { 'Deprovision OpenStack - Ironic API (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services ironic-api',
}
-> exec { 'Deprovision OpenStack - Ironic API (service)':
command => 'sm-deprovision service ironic-api',
}
-> exec { 'Deprovision OpenStack - Ironic Conductor (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services ironic-conductor',
}
-> exec { 'Deprovision OpenStack - Ironic Conductor (service)':
command => 'sm-deprovision service ironic-conductor',
}
-> exec { 'Deprovision OpenStack - Nova Compute (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-compute',
}
-> exec { 'Deprovision OpenStack - Nova Compute (service)':
command => 'sm-deprovision service nova-compute',
}
-> exec { 'Deprovision OpenStack - Nova Serialproxy (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-serialproxy',
}
-> exec { 'Deprovision OpenStack - Nova Serialproxy (service)':
command => 'sm-deprovision service nova-serialproxy',
}
-> exec { 'Provision Ironic TFTP Floating IP (service-group-member)':
command => 'sm-deprovision service-group-member controller-services ironic-tftp-ip',
}
-> exec { 'Provision Ironic TFTP Floating IP (service)':
command => 'sm-deprovision service ironic-tftp-ip',
}
-> exec { 'Deprovision OpenStack - Ironic API (service)':
command => 'sm-deprovision service ironic-api',
}
-> exec { 'Deprovision OpenStack - Ironic Conductor (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services ironic-conductor',
}
-> exec { 'Deprovision OpenStack - Ironic Conductor (service)':
command => 'sm-deprovision service ironic-conductor',
}
-> exec { 'Deprovision OpenStack - Nova Compute (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-compute',
}
-> exec { 'Deprovision OpenStack - Nova Compute (service)':
command => 'sm-deprovision service nova-compute',
}
-> exec { 'Deprovision OpenStack - Nova Serialproxy (service-group-member)':
command => 'sm-deprovision service-group-member cloud-services nova-serialproxy',
}
-> exec { 'Deprovision OpenStack - Nova Serialproxy (service)':
command => 'sm-deprovision service nova-serialproxy',
}
-> exec { 'Provision Ironic TFTP Floating IP (service-group-member)':
command => 'sm-deprovision service-group-member controller-services ironic-tftp-ip',
}
-> exec { 'Provision Ironic TFTP Floating IP (service)':
command => 'sm-deprovision service ironic-tftp-ip',
}
if $ceph_configured {
@ -1587,17 +1174,15 @@ class platform::sm
-> exec { 'Configure OpenStack - DCOrch-patch-api-proxy':
command => "sm-configure service_instance dcorch-patch-api-proxy dcorch-patch-api-proxy \"\"",
}
if $cinder_service_enabled {
notice('Enable cinder-api-proxy')
exec { 'Provision DCOrch-Cinder-Api-Proxy (service-group-member dcorch-cinder-api-proxy)':
command => 'sm-provision service-group-member distributed-cloud-services dcorch-cinder-api-proxy',
}
-> exec { 'Provision DCOrch-Cinder-Api-Proxy in SM (service dcorch-cinder-api-proxy)':
command => 'sm-provision service dcorch-cinder-api-proxy',
}
-> exec { 'Configure OpenStack - DCOrch-cinder-api-proxy':
command => "sm-configure service_instance dcorch-cinder-api-proxy dcorch-cinder-api-proxy \"\"",
}
exec { 'Provision DCOrch-Cinder-Api-Proxy (service-group-member dcorch-cinder-api-proxy)':
command => 'sm-provision service-group-member distributed-cloud-services dcorch-cinder-api-proxy',
}
-> exec { 'Provision DCOrch-Cinder-Api-Proxy in SM (service dcorch-cinder-api-proxy)':
command => 'sm-provision service dcorch-cinder-api-proxy',
}
-> exec { 'Configure OpenStack - DCOrch-cinder-api-proxy':
command => "sm-configure service_instance dcorch-cinder-api-proxy dcorch-cinder-api-proxy \"\"",
}
}

View File

@ -6,9 +6,6 @@ interface=<%= @mgmt_interface %>
<%- if @infra_interface != nil -%>
interface=<%= @infra_interface %>
<%- end -%>
<%- if @ironic_tftp_interface != nil -%>
interface=<%= @ironic_tftp_interface %>
<%- end -%>
bind-interfaces
# Serve addresses from the pxeboot subnet
@ -92,9 +89,6 @@ tftp-root=/pxeboot,<%= @pxeboot_interface %>
<%- else -%>
tftp-root=/pxeboot,<%= @mgmt_interface %>
<%- end -%>
<%- if @ironic_tftp_interface != nil -%>
tftp-root=<%= @ironic_tftpboot_dir %>,<%= @ironic_tftp_interface %>
<%- end -%>
dhcp-boot=tag:bios,tag:pxeboot,pxelinux.0,<%= @pxeboot_hostname %>,<%= @pxeboot_controller_address %>
dhcp-boot=tag:bios,tag:mgmt,pxelinux.0,<%= @mgmt_hostname %>,<%= @mgmt_controller_address %>

View File

@ -10,12 +10,6 @@
#
################################################################################
rewrite r_rewrite_set{
set("<%= @system_name %> aodh-api.log ${HOST}", value("HOST") condition(filter(f_aodhapi)));
set("<%= @system_name %> aodh-dbsync.log ${HOST}", value("HOST") condition(filter(f_aodhdbsync)));
set("<%= @system_name %> aodh-evaluator.log ${HOST}", value("HOST") condition(filter(f_aodhevaluator)));
set("<%= @system_name %> aodh-expirer.log ${HOST}", value("HOST") condition(filter(f_aodhexpirer)));
set("<%= @system_name %> aodh-listener.log ${HOST}", value("HOST") condition(filter(f_aodhlistener)));
set("<%= @system_name %> aodh-notifier.log ${HOST}", value("HOST") condition(filter(f_aodhnotifier)));
set("<%= @system_name %> auth.log ${HOST}", value("HOST") condition(filter(f_auth)));
set("<%= @system_name %> barbican-api.log ${HOST}", value("HOST") condition(filter(f_barbicanapi)));
set("<%= @system_name %> barbican-dbsync.log ${HOST}", value("HOST") condition(filter(f_barbicandbsync)));
@ -23,11 +17,6 @@ rewrite r_rewrite_set{
set("<%= @system_name %> barbican-worker.log ${HOST}", value("HOST") condition(filter(f_barbicanworker)));
set("<%= @system_name %> barbican-cleaner.log ${HOST}", value("HOST") condition(filter(f_barbicancleaner)));
set("<%= @system_name %> bash.log ${HOST}", value("HOST") condition(filter(f_bash)));
set("<%= @system_name %> ceilometer-agent-notification.log ${HOST}", value("HOST") condition(filter(f_ceilometeragentnotification)));
set("<%= @system_name %> ceilometer-upgrade.log ${HOST}", value("HOST") condition(filter(f_ceilometerupgrade)));
set("<%= @system_name %> cinder-api.log ${HOST}", value("HOST") condition(filter(f_cinderapi)));
set("<%= @system_name %> cinder-scheduler.log ${HOST}", value("HOST") condition(filter(f_cinderscheduler)));
set("<%= @system_name %> cinder-volume.log ${HOST}", value("HOST") condition(filter(f_cindervolume)));
set("<%= @system_name %> cron.log ${HOST}", value("HOST") condition(filter(f_cron)));
set("<%= @system_name %> daemon.log ${HOST}", value("HOST") condition(filter(f_daemon)));
set("<%= @system_name %> daemon-ocf.log ${HOST}", value("HOST") condition(filter(f_daemon_ocf)));
@ -37,56 +26,32 @@ rewrite r_rewrite_set{
set("<%= @system_name %> fm-manager.log ${HOST}", value("HOST") condition(filter(f_fm_manager)));
set("<%= @system_name %> ima.log ${HOST}", value("HOST") condition(filter(f_ima)));
set("<%= @system_name %> fsmond.log ${HOST}", value("HOST") condition(filter(f_fsmon)));
set("<%= @system_name %> glance-api.log ${HOST}", value("HOST") condition(filter(f_glanceapi)));
set("<%= @system_name %> glance-registry.log ${HOST}", value("HOST") condition(filter(f_glanceregistry)));
set("<%= @system_name %> glance-registry-api.log ${HOST}", value("HOST") condition(filter(f_glanceregistryrest)));
set("<%= @system_name %> guestAgent.log ${HOST}", value("HOST") condition(filter(f_guestagent)));
set("<%= @system_name %> guestServer.log ${HOST}", value("HOST") condition(filter(f_guestserver)));
set("<%= @system_name %> hbsAgent.log ${HOST}", value("HOST") condition(filter(f_hbsagent)));
set("<%= @system_name %> hbsClient.log ${HOST}", value("HOST") condition(filter(f_hbsclient)));
set("<%= @system_name %> heat-api-cfn.log ${HOST}", value("HOST") condition(filter(f_heatapicfn)));
set("<%= @system_name %> heat-api-cloudwatch.log ${HOST}", value("HOST") condition(filter(f_heatapicloud)));
set("<%= @system_name %> heat-api.log ${HOST}", value("HOST") condition(filter(f_heatapi)));
set("<%= @system_name %> heat-engine.log ${HOST}", value("HOST") condition(filter(f_heatengine)));
set("<%= @system_name %> horizon.log ${HOST}", value("HOST") condition(filter(f_horizon)));
set("<%= @system_name %> hostwd.log ${HOST}", value("HOST") condition(filter(f_hostw)));
set("<%= @system_name %> hwmond.log ${HOST}", value("HOST") condition(filter(f_hwmon)));
set("<%= @system_name %> ironic-api.log ${HOST}", value("HOST") condition(filter(f_ironicapi)));
set("<%= @system_name %> ironic-conductor.log ${HOST}", value("HOST") condition(filter(f_ironicconductor)));
set("<%= @system_name %> kern.log ${HOST}", value("HOST") condition(filter(f_kern)));
set("<%= @system_name %> keystone-api.log ${HOST}", value("HOST") condition(filter(f_keystoneapi)));
set("<%= @system_name %> keystone-all.log ${HOST}", value("HOST") condition(filter(f_keystoneall)));
set("<%= @system_name %> libvirtd.log ${HOST}", value("HOST") condition(filter(f_libvirtd)));
set("<%= @system_name %> local4.log ${HOST}", value("HOST") condition(filter(f_local4)));
set("<%= @system_name %> lpr.log ${HOST}", value("HOST") condition(filter(f_lpr)));
set("<%= @system_name %> magnum-api.log ${HOST}", value("HOST") condition(filter(f_magnumapi)));
set("<%= @system_name %> magnum-conductor.log ${HOST}", value("HOST") condition(filter(f_magnumconductor)));
set("<%= @system_name %> mail.log ${HOST}", value("HOST") condition(filter(f_mail)));
set("<%= @system_name %> mtcAgent_alarm.log ${HOST}", value("HOST") condition(filter(f_mtcagentalarm)));
set("<%= @system_name %> mtcAgent_api.log ${HOST}", value("HOST") condition(filter(f_mtcagentapi)));
set("<%= @system_name %> mtcAgent_event.log ${HOST}", value("HOST") condition(filter(f_mtcagentevent)));
set("<%= @system_name %> mtcAgent.log ${HOST}", value("HOST") condition(filter(f_mtcagent)));
set("<%= @system_name %> mtcClient.log ${HOST}", value("HOST") condition(filter(f_mtcclient)));
set("<%= @system_name %> murano-api.log ${HOST}", value("HOST") condition(filter(f_muranoapi)));
set("<%= @system_name %> murano-engine.log ${HOST}", value("HOST") condition(filter(f_muranoengine)));
set("<%= @system_name %> news.crit ${HOST}", value("HOST") condition(filter(f_newscrit)));
set("<%= @system_name %> news.err ${HOST}", value("HOST") condition(filter(f_newserr)));
set("<%= @system_name %> news.notice ${HOST}", value("HOST") condition(filter(f_newsnotice)));
set("<%= @system_name %> nfv-vim-api.log ${HOST}", value("HOST") condition(filter(f_vim_api)));
set("<%= @system_name %> nfv-vim.log ${HOST}", value("HOST") condition(filter(f_vim)));
set("<%= @system_name %> nfv-vim-webserver.log ${HOST}", value("HOST") condition(filter(f_vim_webserver)));
set("<%= @system_name %> nova-api.log ${HOST}", value("HOST") condition(filter(f_novaapi)));
set("<%= @system_name %> nova-compute.log ${HOST}", value("HOST") condition(filter(f_novacompute)));
set("<%= @system_name %> nova-conductor.log ${HOST}", value("HOST") condition(filter(f_novaconductor)));
set("<%= @system_name %> nova-consoleauth.log ${HOST}", value("HOST") condition(filter(f_novaconsole)));
set("<%= @system_name %> nova-manage.log ${HOST}", value("HOST") condition(filter(f_novamanage)));
set("<%= @system_name %> nova-scheduler.log ${HOST}", value("HOST") condition(filter(f_novascheduler)));
set("<%= @system_name %> nova-placement-api.log ${HOST}", value("HOST") condition(filter(f_novaplacementapi)));
set("<%= @system_name %> neutron-api.log ${HOST}", value("HOST") condition(filter(f_neutronapi)));
set("<%= @system_name %> openstack.log ${HOST}", value("HOST") condition(filter(f_local2)));
set("<%= @system_name %> panko-api.log ${HOST}", value("HOST") condition(filter(f_pankoapi)));
set("<%= @system_name %> panko-dbsync.log ${HOST}", value("HOST") condition(filter(f_pankodbsync)));
set("<%= @system_name %> panko-expirer.log ${HOST}", value("HOST") condition(filter(f_pankoexpirer)));
set("<%= @system_name %> platform.log ${HOST}", value("HOST") condition(filter(f_local1)));
set("<%= @system_name %> pmond.log ${HOST}", value("HOST") condition(filter(f_pmon)));
set("<%= @system_name %> postgres.log ${HOST}", value("HOST") condition(filter(f_local0)));
@ -94,7 +59,6 @@ rewrite r_rewrite_set{
set("<%= @system_name %> rmond_notify.log ${HOST}", value("HOST") condition(filter(f_rmon_notify)));
set("<%= @system_name %> sm.log ${HOST}", value("HOST") condition(filter(f_local3)));
set("<%= @system_name %> sysinv-api.log ${HOST}", value("HOST") condition(filter(f_sysinvapi)));
set("<%= @system_name %> nova-api-proxy.log ${HOST}", value("HOST") condition(filter(f_novaapiproxy)));
set("<%= @system_name %> sysinv.log ${HOST}", value("HOST") condition(filter(f_sysinv)));
set("<%= @system_name %> syslog ${HOST}", value("HOST") condition(filter(f_syslog)));
set("<%= @system_name %> user.log ${HOST}", value("HOST") condition(filter(f_user)));