Removing unused puppet plugins from sysinv

nova, neutron, swift and heat are not installed on bare metal
and do not need to be configured through puppet plugins.

The storage hiera data being calculated in the nova plugin has been
moved to the storage puppet plugin.

Change-Id: Icb1cf333292e17230f871b5227bde9e2ca8ad287
Story: 2004515
Task: 36183
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2019-08-07 13:47:51 -05:00
parent c2f8ada350
commit 1d24b7f50e
11 changed files with 74 additions and 1103 deletions

View File

@ -75,26 +75,11 @@ def get_db_credentials(shared_services, from_release):
{'barbican': {'hiera_user_key': 'barbican::db::postgresql::user',
'keyring_password_key': 'barbican',
},
'heat': {'hiera_user_key': 'heat::db::postgresql::user',
'keyring_password_key': 'heat',
},
'nova': {'hiera_user_key': 'nova::db::postgresql::user',
'keyring_password_key': 'nova',
},
'nova_api': {'hiera_user_key': 'nova::db::postgresql_api::user',
'keyring_password_key': 'nova-api',
},
'sysinv': {'hiera_user_key': 'sysinv::db::postgresql::user',
'keyring_password_key': 'sysinv',
},
}
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
db_credential_keys.update(
{'cinder': {'hiera_user_key': 'cinder::db::postgresql::user',
'keyring_password_key': 'cinder',
}})
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
db_credential_keys.update(
{'keystone': {'hiera_user_key':
@ -509,21 +494,6 @@ def migrate_databases(from_release, shared_services, db_credentials,
# Create minimal config files for each OpenStack service so they can
# run their database migration.
with open("/etc/heat/heat-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'heat'))
with open("/etc/nova/nova-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'nova'))
f.write("[api_database]\n")
f.write(get_connection_string(db_credentials, 'nova_api'))
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
with open("/etc/cinder/cinder-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'cinder'))
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
with open("/etc/keystone/keystone-dbsync.conf", "w") as f:
f.write("[database]\n")
@ -538,26 +508,8 @@ def migrate_databases(from_release, shared_services, db_credentials,
('barbican',
'barbican-manage --config-file /etc/barbican/barbican-dbsync.conf ' +
'db upgrade'),
# Migrate heat
('heat',
'heat-manage --config-file /etc/heat/heat-dbsync.conf db_sync'),
# Migrate nova
('nova',
'nova-manage --config-file /etc/nova/nova-dbsync.conf db sync'),
# Migrate nova_api (new in R3)
('nova',
'nova-manage --config-file /etc/nova/nova-dbsync.conf api_db sync'),
]
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
migrate_commands += [
# Migrate cinder to latest version
('cinder',
'cinder-manage --config-file /etc/cinder/cinder-dbsync.conf ' +
'db sync'),
]
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
# To avoid a deadlock during keystone contract we will use offline
# migration for simplex upgrades. Other upgrades will have to use
@ -603,26 +555,6 @@ def migrate_databases(from_release, shared_services, db_credentials,
(cmd[1], ex.returncode))
raise
# We need to run nova's online DB migrations to complete any DB changes.
# This needs to be done before the computes are upgraded. In other words
# as controller-1 is being upgraded
try:
output = subprocess.check_output(
['nova-manage', '--config-file', '/etc/nova/nova-dbsync.conf',
'db', 'online_data_migrations'])
if 'Error' in output:
LOG.exception("Error detected running nova "
"online_data_migrations. Output %s", output)
raise Exception("Error detected running nova "
"online_data_migrations.")
else:
LOG.info(
"Done running nova online_data_migrations. Output: %s", output)
except subprocess.CalledProcessError as e:
LOG.exception("Nonzero return value running nova "
"online_data_migrations. Output: %s", e.output)
raise
# The database entry for controller-1 will be set to whatever it was when
# the sysinv database was dumped on controller-0. Update the state and
# from/to load to what it should be when it becomes active.
@ -1115,17 +1047,6 @@ def persist_platform_data(staging_dir):
raise
def update_cinder_state():
""" The backing store for cinder volumes and snapshots is not
restored, so their status must be set to error.
"""
conn = psycopg2.connect("dbname=cinder user=postgres")
with conn:
with conn.cursor() as cur:
cur.execute("UPDATE VOLUMES SET STATUS='error';")
cur.execute("UPDATE SNAPSHOTS SET STATUS='error';")
def get_simplex_metadata(archive, staging_dir):
"""Gets the metadata from the archive"""
@ -1350,8 +1271,6 @@ def upgrade_controller_simplex(backup_file):
utils.execute_migration_scripts(
from_release, to_release, utils.ACTION_MIGRATE)
update_cinder_state()
# Generate "regular" manifests
LOG.info("Generating manifests for %s" %
sysinv_constants.CONTROLLER_0_HOSTNAME)

View File

@ -51,17 +51,13 @@ systemconfig.puppet_plugins =
011_ceph = sysinv.puppet.ceph:CephPuppet
012_device = sysinv.puppet.device:DevicePuppet
013_storage = sysinv.puppet.storage:StoragePuppet
014_nova = sysinv.puppet.nova:NovaPuppet
015_neutron = sysinv.puppet.neutron:NeutronPuppet
016_horizon = sysinv.puppet.horizon:HorizonPuppet
022_heat = sysinv.puppet.heat:HeatPuppet
027_dcmanager = sysinv.puppet.dcmanager:DCManagerPuppet
028_dcorch = sysinv.puppet.dcorch:DCOrchPuppet
029_dcdbsync = sysinv.puppet.dcdbsync:DCDBsyncPuppet
030_kubernetes = sysinv.puppet.kubernetes:KubernetesPuppet
031_smapi = sysinv.puppet.smapi:SmPuppet
032_fm = sysinv.puppet.fm:FmPuppet
033_swift = sysinv.puppet.swift:SwiftPuppet
034_barbican = sysinv.puppet.barbican:BarbicanPuppet
035_dockerdistribution = sysinv.puppet.dockerdistribution:DockerDistributionPuppet
036_pciirqaffinity = sysinv.puppet.pci_irq_affinity:PciIrqAffinityPuppet

View File

@ -386,6 +386,10 @@ SB_DEFAULT_NAMES = {
SB_TYPE_EXTERNAL: 'shared_services'
}
# Service names
SERVICE_NAME_NOVA = 'nova'
SERVICE_NAME_NEUTRON = 'neutron'
# Storage backends services
SB_SVC_CINDER = 'cinder'
SB_SVC_GLANCE = 'glance'

View File

@ -1,177 +0,0 @@
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.puppet import openstack
from sysinv.common import constants
class HeatPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for heat configuration"""
SERVICE_NAME = 'heat'
SERVICE_PORT = 8004
SERVICE_PORT_CFN = 8000
SERVICE_PORT_CLOUDWATCH = 8003
SERVICE_PATH = 'v1/%(tenant_id)s'
SERVICE_PATH_WAITCONDITION = 'v1/waitcondition'
DEFAULT_DOMAIN_NAME = 'heat'
DEFAULT_STACK_ADMIN = 'heat_admin'
SERVICE_NAME_DOMAIN = 'heat-domain'
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
return {
'heat::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
dkspass = self._get_service_password(self.SERVICE_NAME_DOMAIN)
return {
'heat::db::postgresql::password': dbpass,
'heat::keystone::auth::password': kspass,
'heat::keystone::auth_cfn::password': kspass,
'heat::keystone::authtoken::password': kspass,
'heat::keystone::domain::domain_password': dkspass,
'heat::engine::auth_encryption_key':
self._generate_random_password(length=32),
'openstack::heat::params::domain_pwd': dkspass,
}
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
'heat::keystone_ec2_uri': self._operator.keystone.get_auth_url(),
'heat::region_name': self.get_region_name(),
'heat::engine::heat_metadata_server_url':
self._get_metadata_url(),
'heat::engine::heat_waitcondition_server_url':
self._get_waitcondition_url(),
'heat::engine::heat_watch_server_url':
self._get_cloudwatch_url(),
'heat::keystone::domain::domain_name': self._get_stack_domain(),
'heat::keystone::domain::domain_admin': self._get_stack_admin(),
'heat::keystone::auth::region': self.get_region_name(),
'heat::keystone::auth::public_url': self.get_public_url(),
'heat::keystone::auth::internal_url': self.get_internal_url(),
'heat::keystone::auth::admin_url': self.get_admin_url(),
'heat::keystone::auth::auth_name': ksuser,
'heat::keystone::auth::tenant': self._get_service_tenant_name(),
'heat::keystone::auth_cfn::region':
self.get_region_name(),
'heat::keystone::auth_cfn::public_url':
self.get_public_url_cfn(),
'heat::keystone::auth_cfn::internal_url':
self.get_internal_url_cfn(),
'heat::keystone::auth_cfn::admin_url':
self.get_admin_url_cfn(),
'heat::keystone::auth_cfn::auth_name': ksuser,
'heat::keystone::auth_cfn::tenant':
self._get_service_tenant_name(),
'heat::keystone::authtoken::auth_url':
self._keystone_identity_uri(),
'heat::keystone::authtoken::auth_uri':
self._keystone_auth_uri(),
'heat::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'heat::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'heat::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'heat::keystone::authtoken::username': ksuser,
'openstack::heat::params::domain_name': self._get_stack_domain(),
'openstack::heat::params::domain_admin': self._get_stack_admin(),
'openstack::heat::params::region_name': self.get_region_name(),
'openstack::heat::params::domain_pwd':
self._get_service_password(self.SERVICE_NAME_DOMAIN),
'openstack::heat::params::service_tenant':
self._get_service_tenant_name(),
'openstack::heat::params::service_create':
self._to_create_services(),
}
if (self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER):
config.update({'openstack::heat::params::service_enabled': False,
'heat::keystone::auth::configure_endpoint': False,
'heat::keystone::auth_cfn::configure_endpoint':
False})
return config
def get_secure_system_config(self):
config = {
'heat::database_connection':
self._format_database_connection(self.SERVICE_NAME),
}
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_public_url_cfn(self):
return self._format_public_endpoint(self.SERVICE_PORT_CFN,
path=self.SERVICE_PATH)
def get_internal_url_cfn(self):
return self._format_private_endpoint(self.SERVICE_PORT_CFN,
path=self.SERVICE_PATH)
def get_admin_url_cfn(self):
return self._format_private_endpoint(self.SERVICE_PORT_CFN,
path=self.SERVICE_PATH)
def _get_metadata_url(self):
return self._format_public_endpoint(self.SERVICE_PORT_CFN)
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)
def _get_waitcondition_url(self):
return self._format_public_endpoint(
self.SERVICE_PORT_CFN, path=self.SERVICE_PATH_WAITCONDITION)
def _get_cloudwatch_url(self):
return self._format_public_endpoint(self.SERVICE_PORT_CLOUDWATCH)
def _get_stack_domain(self):
if self._region_config():
service_config = self._get_service_config(self.SERVICE_NAME)
if service_config is not None:
return service_config.capabilities.get('admin_domain_name')
return self.DEFAULT_DOMAIN_NAME
def _get_stack_admin(self):
if self._region_config():
service_config = self._get_service_config(self.SERVICE_NAME)
if service_config is not None:
return service_config.capabilities.get('admin_user_name')
return self.DEFAULT_STACK_ADMIN

View File

@ -45,8 +45,8 @@ class SystemInventoryPuppet(openstack.OpenstackBasePuppet):
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
neutron_region_name = self._operator.neutron.get_region_name()
nova_region_name = self._operator.nova.get_region_name()
neutron_region_name = self._get_service_region_name(constants.SERVICE_NAME_NEUTRON)
nova_region_name = self._get_service_region_name(constants.SERVICE_NAME_NOVA)
barbican_region_name = self._operator.barbican.get_region_name()
return {

View File

@ -1,195 +0,0 @@
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import utils
from sysinv.puppet import interface
from sysinv.puppet import openstack
from oslo_log import log
LOG = log.getLogger(__name__)
class NeutronPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for neutron configuration"""
SERVICE_NAME = 'neutron'
SERVICE_PORT = 9696
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
return {
'neutron::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'neutron::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'neutron::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'neutron::server::notifications::user_domain_name':
self._get_service_user_domain_name(),
'neutron::server::notifications::project_domain_name':
self._get_service_project_domain_name(),
'neutron::server::notifications::project_name':
self._get_service_tenant_name(),
'neutron::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'neutron::keystone::auth::password': kspass,
'neutron::keystone::authtoken::password': kspass,
'neutron::db::postgresql::password': dbpass,
'neutron::server::notifications::password':
self._get_service_password(
self._operator.nova.SERVICE_NAME),
'neutron::agents::metadata::shared_secret':
self._get_service_password(
self._operator.nova.SERVICE_METADATA),
}
def get_system_config(self):
neutron_nova_region_name = \
self._get_service_region_name(self._operator.nova.SERVICE_NAME)
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
'neutron::server::notifications::auth_url':
self._keystone_identity_uri(),
'neutron::server::notifications::tenant_name':
self._get_service_tenant_name(),
'neutron::server::notifications::project_name':
self._get_service_tenant_name(),
'neutron::server::notifications::region_name':
neutron_nova_region_name,
'neutron::server::notifications::username':
self._get_service_user_name(self._operator.nova.SERVICE_NAME),
'neutron::server::notifications::project_domain_name':
self._get_service_project_domain_name(),
'neutron::server::notifications::user_domain_name':
self._get_service_user_domain_name(),
'neutron::agents::metadata::metadata_ip':
self._get_management_address(),
'neutron::keystone::authtoken::auth_url':
self._keystone_identity_uri(),
'neutron::keystone::authtoken::auth_uri':
self._keystone_auth_uri(),
'neutron::keystone::authtoken::username': ksuser,
'neutron::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'neutron::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'neutron::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'neutron::keystone::authtoken::region_name':
self._keystone_region_name(),
'neutron::keystone::auth::public_url': self.get_public_url(),
'neutron::keystone::auth::internal_url': self.get_internal_url(),
'neutron::keystone::auth::admin_url': self.get_admin_url(),
'neutron::keystone::auth::region': self._region_name(),
'neutron::keystone::auth::auth_name': ksuser,
'neutron::keystone::auth::tenant': self._get_service_tenant_name(),
'neutron::bind_host': self._get_management_address(),
'openstack::neutron::params::region_name':
self.get_region_name(),
'openstack::neutron::params::service_create':
self._to_create_services(),
}
# no need to configure neutron endpoint as the proxy provides
# the endpoints in SystemController
if (self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER):
config.update({
'neutron::keystone::auth::configure_endpoint': False,
'openstack::neutron::params::configure_endpoint': False,
})
config.update(self._get_sdn_controller_config())
return config
def get_secure_system_config(self):
config = {
'neutron::server::database_connection':
self._format_database_connection(self.SERVICE_NAME),
}
return config
def _get_sdn_controller_config(self):
if not self._sdn_enabled():
return {}
controller_config = {}
for controller in self.dbapi.sdn_controller_get_list():
# skip SDN controllers that are in disabled state
if controller.state != constants.SDN_CONTROLLER_STATE_ENABLED:
continue
# openstack::neutron::sdn::controller puppet resource parameters
name = 'sdn_controller_%d' % controller.id
config = {
'transport': controller.transport.lower(),
'ip_address': str(controller.ip_address),
'port': controller.port,
}
controller_config.update({name: config})
return {
'openstack::neutron::odl::params::controller_config':
controller_config
}
def get_host_config(self, host):
if (constants.CONTROLLER not in utils.get_personalities(host) and
constants.WORKER not in utils.get_personalities(host)):
return {}
device_mappings = []
for iface in self.context['interfaces'].values():
if (iface['ifclass'] in [constants.INTERFACE_CLASS_PCI_SRIOV]):
port = interface.get_interface_port(self.context, iface)
datanets = interface.get_interface_datanets(
self.context, iface)
for dnet in datanets:
device_mappings.append(
"%s:%s" % (dnet['name'], port['name']))
LOG.debug("get_host_config device_mappings=%s" %
device_mappings)
config = {
'neutron::agents::ml2::sriov::physical_device_mappings':
device_mappings,
}
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)

View File

@ -272,7 +272,3 @@ class NfvPuppet(openstack.OpenstackBasePuppet):
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def _get_nova_endpoint_url(self):
return self._format_private_endpoint(
self._operator.nova.SERVICE_API_PORT)

View File

@ -1,580 +0,0 @@
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import json
import os
import re
import shutil
import subprocess
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.puppet import openstack
from sysinv.puppet import interface
from oslo_log import log
LOG = log.getLogger(__name__)
SCHEDULER_FILTERS_COMMON = [
'RetryFilter',
'ComputeFilter',
'BaremetalFilter',
'AvailabilityZoneFilter',
'AggregateInstanceExtraSpecsFilter',
'RamFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'CoreFilter',
'VCpuModelFilter',
'NUMATopologyFilter',
'ServerGroupAffinityFilter',
'ServerGroupAntiAffinityFilter',
'PciPassthroughFilter',
'DiskFilter',
'AggregateProviderNetworkFilter',
]
SCHEDULER_FILTERS_STANDARD = [
]
DEFAULT_NOVA_PCI_ALIAS = [
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"product_id": constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE,
"name": constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_NAME},
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_VF_VENDOR,
"product_id": constants.NOVA_PCI_ALIAS_QAT_DH895XCC_VF_DEVICE,
"name": constants.NOVA_PCI_ALIAS_QAT_DH895XCC_VF_NAME},
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"product_id": constants.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE,
"name": constants.NOVA_PCI_ALIAS_QAT_C62X_PF_NAME},
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_VF_VENDOR,
"product_id": constants.NOVA_PCI_ALIAS_QAT_C62X_VF_DEVICE,
"name": constants.NOVA_PCI_ALIAS_QAT_C62X_VF_NAME},
{"name": constants.NOVA_PCI_ALIAS_GPU_NAME}
]
class NovaPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for nova configuration"""
SERVICE_NAME = 'nova'
SERVICE_PORT = 8774
SERVICE_PATH = 'v2.1/%(tenant_id)s'
SERVICE_API_NAME = 'nova-api'
SERVICE_API_PORT = 18774
DATABASE_NOVA_API = 'nova_api'
SERVICE_METADATA = 'nova-metadata'
PLACEMENT_NAME = 'placement'
PLACEMENT_PORT = 8778
SERIALPROXY_PORT = 6083
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
api_dbuser = self._get_database_username(self.SERVICE_API_NAME)
return {
'nova::db::postgresql::user': dbuser,
'nova::db::postgresql_api::user': api_dbuser,
}
def get_secure_static_config(self):
ssh_config_dir = os.path.join(self.CONFIG_WORKDIR, 'ssh_config')
migration_key = os.path.join(ssh_config_dir, 'nova_migration_key')
system_host_key = os.path.join(ssh_config_dir, 'system_host_key')
# Generate the keys.
if os.path.exists(ssh_config_dir):
shutil.rmtree(ssh_config_dir)
os.makedirs(ssh_config_dir)
try:
cmd = ['ssh-keygen', '-t', 'rsa', '-b' '2048', '-N', '',
'-f', migration_key]
with open(os.devnull, "w") as fnull:
subprocess.check_call(cmd, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
raise exception.SysinvException('Failed to generate nova rsa key')
# Generate an ecdsa key for the system, which will be used on all
# controller/worker nodes. When external ssh connections to the
# controllers are made, this key will be stored in the known_hosts file
# and allow connections after the controller swacts. The ecdsa key
# has precedence over the rsa key, which is why we use ecdsa.
try:
cmd = ['ssh-keygen', '-t', 'ecdsa', '-b', '256', '-N', '',
'-f', system_host_key]
with open(os.devnull, "w") as fnull:
subprocess.check_call(cmd, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
raise exception.SysinvException(
'Failed to generate nova ecdsa key')
# Read the public/private migration keys
with open(migration_key) as fp:
migration_private = fp.read().strip()
with open('%s.pub' % migration_key) as fp:
migration_header, migration_public, _ = fp.read().strip().split()
# Read the public/private host keys
with open(system_host_key) as fp:
host_private = fp.read().strip()
with open('%s.pub' % system_host_key) as fp:
host_header, host_public, _ = fp.read().strip().split()
# Add our pre-generated system host key to /etc/ssh/ssh_known_hosts
ssh_keys = {
'system_host_key': {
'ensure': 'present',
'name': '*',
'host_aliases': [],
'type': host_header,
'key': host_public
}
}
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
kspass_placement = self._get_service_password(self.PLACEMENT_NAME)
api_dbpass = self._get_database_password(self.SERVICE_API_NAME)
return {
'nova::db::postgresql::password': dbpass,
'nova::db::postgresql_api::password': api_dbpass,
'nova::keystone::auth::password': kspass,
'nova::keystone::auth_placement::password': kspass_placement,
'nova::keystone::authtoken::password': kspass,
'nova::api::neutron_metadata_proxy_shared_secret':
self._get_service_password(self.SERVICE_METADATA),
'nova_api_proxy::config::admin_password': kspass,
'nova::network::neutron::neutron_password':
self._get_neutron_password(),
'nova::placement::password': self._get_placement_password(),
'openstack::nova::compute::ssh_keys': ssh_keys,
'openstack::nova::compute::host_key_type': 'ssh-ecdsa',
'openstack::nova::compute::host_private_key': host_private,
'openstack::nova::compute::host_public_key': host_public,
'openstack::nova::compute::host_public_header': host_header,
'openstack::nova::compute::migration_key_type': 'ssh-rsa',
'openstack::nova::compute::migration_private_key':
migration_private,
'openstack::nova::compute::migration_public_key':
migration_public,
}
def get_system_config(self):
system = self._get_system()
scheduler_filters = SCHEDULER_FILTERS_COMMON
if system.system_type == constants.TIS_STD_BUILD:
scheduler_filters.extend(SCHEDULER_FILTERS_STANDARD)
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
'nova::keystone::auth::region': self._region_name(),
'nova::keystone::auth::public_url': self.get_public_url(),
'nova::keystone::auth::internal_url': self.get_internal_url(),
'nova::keystone::auth::admin_url': self.get_admin_url(),
'nova::keystone::auth::auth_name': ksuser,
'nova::keystone::auth::tenant': self._get_service_tenant_name(),
'nova::keystone::auth_placement::region':
self._region_name(),
'nova::keystone::auth_placement::public_url':
self.get_placement_public_url(),
'nova::keystone::auth_placement::internal_url':
self.get_placement_internal_url(),
'nova::keystone::auth_placement::admin_url':
self.get_placement_admin_url(),
'nova::keystone::auth_placement::auth_name':
self._get_service_user_name(self.PLACEMENT_NAME),
'nova::keystone::auth_placement::tenant':
self._get_service_tenant_name(),
'nova::keystone::authtoken::auth_url':
self._keystone_identity_uri(),
'nova::keystone::authtoken::auth_uri':
self._keystone_auth_uri(),
'nova::keystone::authtoken::region_name':
self._keystone_region_name(),
'nova::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'nova::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'nova::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'nova::keystone::authtoken::username': ksuser,
'nova::network::neutron::neutron_url':
self._operator.neutron.get_internal_url(),
'nova::network::neutron::neutron_auth_url':
self._keystone_identity_uri(),
'nova::network::neutron::neutron_username':
self._get_neutron_user_name(),
'nova::network::neutron::neutron_region_name':
self._operator.neutron.get_region_name(),
'nova::network::neutron::neutron_project_name':
self._get_service_tenant_name(),
'nova::network::neutron::neutron_user_domain_name':
self._get_service_user_domain_name(),
'nova::network::neutron::neutron_project_domain_name':
self._get_service_project_domain_name(),
'nova::placement::auth_url':
self._keystone_identity_uri(),
'nova::placement::username':
self._get_placement_user_name(),
'nova::placement::os_region_name':
self.get_placement_region_name(),
'nova::placement::project_name':
self._get_service_tenant_name(),
'nova::scheduler::filter::scheduler_default_filters':
scheduler_filters,
'nova::vncproxy::host': self._get_management_address(),
'nova::serialproxy::serialproxy_host': self._get_management_address(),
'nova::api::api_bind_address': self._get_management_address(),
'nova::api::metadata_listen': self._get_management_address(),
'nova::api::compute_link_prefix':
self._get_compute_url(),
'openstack::nova::params::region_name':
self.get_region_name(),
'nova_api_proxy::config::osapi_compute_listen':
self._get_management_address(),
'nova_api_proxy::config::osapi_proxy_listen':
self._get_management_address(),
'nova_api_proxy::config::admin_user': ksuser,
'nova_api_proxy::config::user_domain_name':
self._get_service_user_domain_name(),
'nova_api_proxy::config::project_domain_name':
self._get_service_project_domain_name(),
'nova_api_proxy::config::admin_tenant_name':
self._get_service_tenant_name(),
'nova_api_proxy::config::auth_uri':
self._keystone_auth_uri(),
'nova_api_proxy::config::identity_uri':
self._keystone_identity_uri(),
'nova::compute::vncproxy_host':
self._get_oam_address(),
# NOTE(knasim): since the HAPROXY frontend for the
# VNC proxy is always over HTTP, the reverse path proxy
# should always be over HTTP, despite the public protocol
'nova::compute::vncproxy_protocol':
self._get_private_protocol(),
'nova::pci::aliases': self._get_pci_alias(),
'openstack::nova::params::service_create': self._to_create_services(),
'nova::compute::serial::base_url':
self._get_nova_serial_baseurl(),
'nova::compute::serial::proxyclient_address':
self._get_management_address(),
}
# no need to configure nova endpoint as the proxy provides
# the endpoints in SystemController
if (self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER):
config.update({
'nova::keystone::auth::configure_endpoint': False,
'nova::keystone::auth_placement::configure_endpoint': False,
'openstack::nova::params::configure_endpoint': False,
})
return config
def get_secure_system_config(self):
config = {
'nova::database_connection':
self._format_database_connection(self.SERVICE_NAME),
'nova::api_database_connection':
self._format_database_connection(
self.SERVICE_API_NAME, database=self.DATABASE_NOVA_API),
}
return config
def get_host_config(self, host):
config = {}
if constants.WORKER in host.subfunctions:
# nova storage and compute configuration is required for hosts
# with a compute function only
config.update(self._get_compute_config(host))
config.update(self._get_storage_config(host))
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)
def get_placement_public_url(self):
return self._format_public_endpoint(self.PLACEMENT_PORT)
def get_placement_internal_url(self):
return self._format_private_endpoint(self.PLACEMENT_PORT)
def get_placement_admin_url(self):
return self._format_private_endpoint(self.PLACEMENT_PORT)
def get_placement_region_name(self):
return self._get_service_region_name(self.PLACEMENT_NAME)
def _get_compute_url(self):
return self._format_public_endpoint(self.SERVICE_PORT)
def _get_neutron_password(self):
return self._get_service_password(self._operator.neutron.SERVICE_NAME)
def _get_placement_password(self):
return self._get_service_password(self.PLACEMENT_NAME)
def _get_neutron_user_name(self):
return self._get_service_user_name(self._operator.neutron.SERVICE_NAME)
def _get_placement_user_name(self):
return self._get_service_user_name(self.PLACEMENT_NAME)
def _get_pci_alias(self):
alias_config = DEFAULT_NOVA_PCI_ALIAS[:]
return alias_config
def _get_compute_config(self, host):
return {
'nova::compute::enabled': self._enable_nova_compute(),
'nova::compute::libvirt::manage_libvirt_services':
self._enable_nova_compute(),
'nova::migration::libvirt::configure_libvirt':
self._enable_nova_compute(),
'nova::compute::compute_reserved_vm_memory_2M':
self._get_reserved_memory_2M(host),
'nova::compute::compute_reserved_vm_memory_1G':
self._get_reserved_memory_1G(host),
'nova::compute::vcpu_pin_set':
self._get_vcpu_pin_set(host),
'nova::compute::shared_pcpu_map':
self._get_shared_pcpu_map(host),
'openstack::nova::compute::pci::pci_pt_whitelist':
self._get_pci_pt_whitelist(host),
'openstack::nova::compute::pci::pci_sriov_whitelist':
self._get_pci_sriov_whitelist(host),
'openstack::nova::compute::iscsi_initiator_name':
host.iscsi_initiator_name
}
def _get_storage_config(self, host):
pvs = self.dbapi.ipv_get_by_ihost(host.id)
final_pvs = []
adding_pvs = []
removing_pvs = []
for pv in pvs:
if (pv.lvm_vg_name == constants.LVG_NOVA_LOCAL and
pv.pv_state != constants.PV_ERR):
pv_path = pv.disk_or_part_device_path
if (pv.pv_type == constants.PV_TYPE_PARTITION and
'-part' not in pv.disk_or_part_device_path and
'-part' not in pv.lvm_vg_name):
# add the disk partition to the disk path
partition_number = re.match('.*?([0-9]+)$',
pv.lvm_pv_name).group(1)
pv_path += "-part%s" % partition_number
if (pv.pv_state == constants.PV_ADD):
adding_pvs.append(pv_path)
final_pvs.append(pv_path)
elif(pv.pv_state == constants.PV_DEL):
removing_pvs.append(pv_path)
else:
final_pvs.append(pv_path)
global_filter, update_filter = self._get_lvm_global_filter(host)
values = {
'platform::worker::storage::final_pvs': final_pvs,
'platform::worker::storage::adding_pvs': adding_pvs,
'platform::worker::storage::removing_pvs': removing_pvs,
'platform::worker::storage::lvm_global_filter': global_filter,
'platform::worker::storage::lvm_update_filter': update_filter}
# If NOVA is a service on a ceph-external backend, use the ephemeral_pool
# and ceph_conf file that are stored in that DB entry.
# If NOVA is not on any ceph-external backend, it must be on the internal
# ceph backend with default "ephemeral" pool and default "/etc/ceph/ceph.conf"
# config file
sb_list = self.dbapi.storage_backend_get_list_by_type(
backend_type=constants.SB_TYPE_CEPH_EXTERNAL)
if sb_list:
for sb in sb_list:
if constants.SB_SVC_NOVA in sb.services:
ceph_ext_obj = self.dbapi.storage_ceph_external_get(sb.id)
images_rbd_pool = sb.capabilities.get('ephemeral_pool')
images_rbd_ceph_conf = \
constants.CEPH_CONF_PATH + os.path.basename(ceph_ext_obj.ceph_conf)
values.update({'openstack::nova::storage::images_rbd_pool':
images_rbd_pool,
'openstack::nova::storage::images_rbd_ceph_conf':
images_rbd_ceph_conf, })
return values
# TODO(oponcea): Make lvm global_filter generic
def _get_lvm_global_filter(self, host):
# Always include the global LVM devices in the final list of devices
filtered_disks = self._operator.storage.get_lvm_devices()
removing_disks = []
# add nova-local filter
pvs = self.dbapi.ipv_get_by_ihost(host.id)
for pv in pvs:
if pv.lvm_vg_name == constants.LVG_NOVA_LOCAL:
if pv.pv_state == constants.PV_DEL:
removing_disks.append(pv.disk_or_part_device_path)
else:
filtered_disks.append(pv.disk_or_part_device_path)
elif pv.lvm_vg_name == constants.LVG_CINDER_VOLUMES:
if constants.CINDER_DRBD_DEVICE not in filtered_disks:
filtered_disks.append(constants.CINDER_DRBD_DEVICE)
# The global filters contain only the final disks, while the update
# filter contains the transient list of removing disks as well
global_filter = self._operator.storage.format_lvm_filter(
list(set(filtered_disks)))
update_filter = self._operator.storage.format_lvm_filter(
list(set(removing_disks + filtered_disks)))
return global_filter, update_filter
def _get_reserved_memory_2M(self, host):
host_memory = self.dbapi.imemory_get_by_ihost(host.id)
memory_nodes = []
for memory in host_memory:
if isinstance(memory.vm_hugepages_nr_2M_pending, int):
memory_node = "\"node%d:%dkB:%d\"" % (
memory.numa_node, 1024 * 2, # 2M pages
memory.vm_hugepages_nr_2M_pending)
memory_nodes.append(memory_node)
return "(%s)" % ' '.join(memory_nodes)
def _get_reserved_memory_1G(self, host):
host_memory = self.dbapi.imemory_get_by_ihost(host.id)
memory_nodes = []
for memory in host_memory:
if isinstance(memory.vm_hugepages_nr_1G_pending, int):
memory_node = "\"node%d:%dkB:%d\"" % (
memory.numa_node, 1024 * 1024, # 1G pages
memory.vm_hugepages_nr_1G_pending)
memory_nodes.append(memory_node)
return "(%s)" % ' '.join(memory_nodes)
def _get_vcpu_pin_set(self, host):
vm_cpus = self._get_host_cpu_list(
host, function=constants.APPLICATION_FUNCTION, threads=True)
cpu_list = [c.cpu for c in vm_cpus]
return "\"%s\"" % utils.format_range_set(cpu_list)
def _get_shared_pcpu_map(self, host):
shared_cpus = self._get_host_cpu_list(
host, function=constants.SHARED_FUNCTION, threads=True)
cpu_map = {c.numa_node: c.cpu for c in shared_cpus}
return "\"%s\"" % ','.join(
"%r:%r" % (node, cpu) for node, cpu in cpu_map.items())
def _get_pci_pt_whitelist(self, host):
# Process all configured PCI passthrough interfaces and add them to
# the list of devices to whitelist
devices = []
for iface in self.context['interfaces'].values():
if iface['ifclass'] in [constants.INTERFACE_CLASS_PCI_PASSTHROUGH]:
port = interface.get_interface_port(self.context, iface)
dnames = interface._get_datanetwork_names(self.context, iface)
device = {
'address': port['pciaddr'],
'physical_network': dnames
}
LOG.debug("_get_pci_pt_whitelist device=%s" % device)
devices.append(device)
# Process all enabled PCI devices configured for PT and SRIOV and
# add them to the list of devices to whitelist.
# Since we are now properly initializing the qat driver and
# restarting sysinv, we need to add VF devices to the regular
# whitelist instead of the sriov whitelist
pci_devices = self.dbapi.pci_device_get_by_host(host.id)
for pci_device in pci_devices:
if pci_device.enabled:
device = {
'address': pci_device.pciaddr,
}
devices.append(device)
return json.dumps(devices)
def _get_pci_sriov_whitelist(self, host):
# Process all configured SRIOV passthrough interfaces and add them to
# the list of devices to whitelist
devices = []
for iface in self.context['interfaces'].values():
if iface['ifclass'] in [constants.INTERFACE_CLASS_PCI_SRIOV]:
port = interface.get_interface_port(self.context, iface)
dnames = interface._get_datanetwork_names(self.context, iface)
device = {
'address': port['pciaddr'],
'physical_network': dnames,
'sriov_numvfs': iface['sriov_numvfs']
}
LOG.debug("_get_pci_sriov_whitelist device=%s" % device)
devices.append(device)
return json.dumps(devices) if devices else None
def _get_nova_serial_baseurl(self):
oam_addr = self._format_url_address(self._get_oam_address())
ws_protocol = 'ws'
url = "%s://%s:%s" % (ws_protocol, str(oam_addr), str(self.SERIALPROXY_PORT))
return url
def _enable_nova_compute(self):
return False

View File

@ -188,9 +188,6 @@ class OpenstackBasePuppet(base.BasePuppet):
return service_config.capabilities.get(stype)
return None
def _get_swift_service_user_domain_name(self):
return self._operator.keystone.get_swift_service_user_domain()
def _get_service_user_domain_name(self):
return self._operator.keystone.get_service_user_domain()

View File

@ -5,9 +5,9 @@
#
import json
import re
from sysinv.common import constants
from sysinv.puppet import base
@ -24,6 +24,8 @@ class StoragePuppet(base.BasePuppet):
config.update(self._get_partition_config(host))
config.update(self._get_lvm_config(host))
config.update(self._get_host_fs_config(host))
if constants.WORKER in host.subfunctions:
config.update(self._get_worker_config(host))
return config
def _get_filesystem_config(self):
@ -253,3 +255,68 @@ class StoragePuppet(base.BasePuppet):
'platform::filesystem::kubelet::params::lv_size': fs.size
})
return config
def _get_worker_config(self, host):
pvs = self.dbapi.ipv_get_by_ihost(host.id)
final_pvs = []
adding_pvs = []
removing_pvs = []
for pv in pvs:
if (pv.lvm_vg_name == constants.LVG_NOVA_LOCAL and
pv.pv_state != constants.PV_ERR):
pv_path = pv.disk_or_part_device_path
if (pv.pv_type == constants.PV_TYPE_PARTITION and
'-part' not in pv.disk_or_part_device_path and
'-part' not in pv.lvm_vg_name):
# add the disk partition to the disk path
partition_number = re.match('.*?([0-9]+)$',
pv.lvm_pv_name).group(1)
pv_path += "-part%s" % partition_number
if (pv.pv_state == constants.PV_ADD):
adding_pvs.append(pv_path)
final_pvs.append(pv_path)
elif(pv.pv_state == constants.PV_DEL):
removing_pvs.append(pv_path)
else:
final_pvs.append(pv_path)
global_filter, update_filter = self._get_lvm_global_filter(host)
values = {
'platform::worker::storage::final_pvs': final_pvs,
'platform::worker::storage::adding_pvs': adding_pvs,
'platform::worker::storage::removing_pvs': removing_pvs,
'platform::worker::storage::lvm_global_filter': global_filter,
'platform::worker::storage::lvm_update_filter': update_filter}
return values
# TODO(oponcea): Make lvm global_filter generic
def _get_lvm_global_filter(self, host):
# Always include the global LVM devices in the final list of devices
filtered_disks = self._operator.storage.get_lvm_devices()
removing_disks = []
# add nova-local filter
pvs = self.dbapi.ipv_get_by_ihost(host.id)
for pv in pvs:
if pv.lvm_vg_name == constants.LVG_NOVA_LOCAL:
if pv.pv_state == constants.PV_DEL:
removing_disks.append(pv.disk_or_part_device_path)
else:
filtered_disks.append(pv.disk_or_part_device_path)
elif pv.lvm_vg_name == constants.LVG_CINDER_VOLUMES:
if constants.CINDER_DRBD_DEVICE not in filtered_disks:
filtered_disks.append(constants.CINDER_DRBD_DEVICE)
# The global filters contain only the final disks, while the update
# filter contains the transient list of removing disks as well
global_filter = self._operator.storage.format_lvm_filter(
list(set(filtered_disks)))
update_filter = self._operator.storage.format_lvm_filter(
list(set(removing_disks + filtered_disks)))
return global_filter, update_filter

View File

@ -1,56 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.puppet import openstack
class SwiftPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for Swift configuration"""
SERVICE_NAME = 'swift'
SERVICE_PORT = 8080
SERVICE_PATH = 'v1/AUTH_%(tenant_id)s'
def get_secure_static_config(self):
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'swift::keystone::auth::password': kspass,
'swift::proxy::authtoken::password': kspass,
}
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
'openstack::swift::params::api_host':
self._get_management_address(),
'swift::keystone::auth::region':
self._get_service_region_name(self.SERVICE_NAME),
'swift::keystone::auth::auth_name': ksuser,
'swift::keystone::auth::tenant': self._get_service_tenant_name(),
'swift::keystone::auth::public_url': self.get_public_url(),
'swift::keystone::auth::internal_url': self.get_internal_url(),
'swift::keystone::auth::admin_url': self.get_admin_url(),
'swift::proxy::authtoken::auth_uri': self._keystone_auth_uri(),
'swift::proxy::authtoken::auth_url': self._keystone_identity_uri(),
'swift::proxy::authtoken::project_name':
self._get_service_tenant_name(),
'swift::proxy::authtoken::username': ksuser,
}
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)