Removing unused cinder references from bare metal

- Removed the sysinv puppet cinder operator.
- Updated all puppet operators that referenced cinder, those
operators are likely to be removed as well.
- Removed all cinder service parameters.
- Cleaned up some requirements warnings from tox.
- Removed any unused constants that seemed related to cinder
that were not being referenced anywhere in the code.

Note: the code related to cinder storage backend remains.
This will be cleaned up as part of shared services and
distributed cloud cleanup and re-implementation.

Change-Id: I3861f5e48d2fd89fdfd33b9c1431d8fdc7ed05ce
Story: 2004764
Task: 33614
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2019-06-27 08:42:57 -05:00
parent 4207053b9c
commit 8834edaac1
14 changed files with 15 additions and 2458 deletions

View File

@ -28,6 +28,7 @@ keystonemiddleware>=4.12.0 # Apache-2.0
stevedore>=0.10
websockify>=0.8.0 # LGPLv3
pecan>=1.0.0
retrying!=1.3.0 # Apache-2.0
six>=1.4.1
jsonpatch>=1.1
WSME>=0.5b2
@ -40,4 +41,3 @@ ruamel.yaml>=0.13.14 # MIT
docker # Apache-2.0
kubernetes # Apache-2.0
Django<2,>=1.11.20 # BSD

View File

@ -54,7 +54,6 @@ systemconfig.puppet_plugins =
014_nova = sysinv.puppet.nova:NovaPuppet
015_neutron = sysinv.puppet.neutron:NeutronPuppet
016_horizon = sysinv.puppet.horizon:HorizonPuppet
019_cinder = sysinv.puppet.cinder:CinderPuppet
022_heat = sysinv.puppet.heat:HeatPuppet
025_ironic = sysinv.puppet.ironic:IronicPuppet
027_dcmanager = sysinv.puppet.dcmanager:DCManagerPuppet

View File

@ -8,7 +8,6 @@
#
import copy
import netaddr
import pecan
from fm_api import constants as fm_constants
from fm_api import fm_api
@ -18,8 +17,6 @@ import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from six.moves.urllib.parse import urlparse
from sysinv.api.controllers.v1 import address_pool
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
@ -137,21 +134,6 @@ class ServiceParameterController(rest.RestController):
def __init__(self, parent=None, **kwargs):
self._parent = parent
# Add additional hpe3par backends
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
section = "{0}{1}".format(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR, i)
service_parameter.SERVICE_PARAMETER_SCHEMA[constants.SERVICE_TYPE_CINDER][section] = {
service_parameter.SERVICE_PARAM_MANDATORY:
service_parameter.CINDER_HPE3PAR_PARAMETER_MANDATORY,
service_parameter.SERVICE_PARAM_PROTECTED:
service_parameter.CINDER_HPE3PAR_PARAMETER_PROTECTED,
service_parameter.SERVICE_PARAM_OPTIONAL:
service_parameter.CINDER_HPE3PAR_PARAMETER_OPTIONAL,
service_parameter.SERVICE_PARAM_VALIDATOR:
service_parameter.CINDER_HPE3PAR_PARAMETER_VALIDATOR,
service_parameter.SERVICE_PARAM_RESOURCE:
service_parameter.CINDER_HPE3PAR_PARAMETER_RESOURCE,
}
def _get_service_parameter_collection(self, marker=None, limit=None,
sort_key=None, sort_dir=None,
@ -184,19 +166,6 @@ class ServiceParameterController(rest.RestController):
parms = [p for p in parms if not
p.service == constants.SERVICE_TYPE_CEPH]
# filter out cinder state
parms = [p for p in parms if not (
p.service == constants.SERVICE_TYPE_CINDER and (
p.section == constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX_STATE or
p.section == constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR_STATE or
p.section == constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND_STATE))]
# filter out cinder multipath state
parms = [p for p in parms if not(
p.service == constants.SERVICE_TYPE_CINDER and
p.section == constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT and
p.name == constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE)]
# Before we can return the service parameter collection,
# we need to ensure that the list does not contain any
# "protected" service parameters which may need to be
@ -586,19 +555,6 @@ class ServiceParameterController(rest.RestController):
self._check_parameter_syntax(parameter)
if parameter['service'] == constants.SERVICE_TYPE_CINDER:
if (parameter['name'] ==
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED):
if (parameter['value'].lower() == 'false' and
old_parameter['value'].lower() == 'true'):
if not pecan.request.rpcapi.validate_emc_removal(
pecan.request.context):
msg = _(
"Unable to modify service parameter. Can not "
"disable %s while in use"
% constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX)
raise wsme.exc.ClientSideError(msg)
updated_parameter = pecan.request.dbapi.service_parameter_update(
uuid, updates)
@ -645,11 +601,6 @@ class ServiceParameterController(rest.RestController):
msg = _("Ceph backend is required.")
raise wsme.exc.ClientSideError(msg)
if parameter.service == constants.SERVICE_TYPE_CINDER:
if parameter.name == 'data_san_ip':
msg = _("Parameter '%s' is readonly." % parameter.name)
raise wsme.exc.ClientSideError(msg)
if parameter.section == \
constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE:
msg = _("Platform Maintenance Parameter '%s' is required." %
@ -698,220 +649,6 @@ class ServiceParameterController(rest.RestController):
constants.SERVICE_PARAM_SECTION_IDENTITY_LDAP))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _service_parameter_apply_semantic_check_cinder_default():
"""Semantic checks for the Cinder Service Type: DEFAULT parameters """
try:
volume_type = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT,
name=constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE)
except exception.MultipleResults:
msg = (_('Unable to apply service parameters. Multiple parameters '
'found for %s/%s/%s. Ensure only one parameter is '
'provided.') % (
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT,
constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE))
raise wsme.exc.ClientSideError(msg)
except exception.NotFound:
# not required to be set
volume_type = None
if volume_type:
try:
volume_types = pecan.request.rpcapi.get_cinder_volume_type_names(
pecan.request.context)
except rpc_common.RemoteError as e:
raise wsme.exc.ClientSideError(str(e.value))
if volume_type.value not in volume_types:
msg = (_('Unable to apply service parameters. Cannot set "%s" '
'to value "%s". This is not a valid cinder volume '
'type. Acceptable values are: [%s].') % (
constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE,
volume_type.value, ','.join(volume_types)))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _service_parameter_apply_semantic_check_cinder_emc_vnx():
"""Semantic checks for the Cinder Service Type: EMC VNX backend """
feature_enabled = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX,
name=constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED)
if feature_enabled.value.lower() == 'true':
for name in service_parameter.CINDER_EMC_VNX_PARAMETER_REQUIRED_ON_FEATURE_ENABLED:
try:
pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX,
name=name)
except exception.NotFound:
msg = _("Unable to apply service parameters. "
"Missing service parameter '%s' for service '%s' "
"in section '%s'." % (name,
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX))
raise wsme.exc.ClientSideError(msg)
else:
if not pecan.request.rpcapi.validate_emc_removal(
pecan.request.context):
msg = _("Unable to apply service parameters. Can not disable "
"%s while in use. Remove any EMC volumes."
% constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX)
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _emc_vnx_ip_addresses_reservation():
"""Reserve the provided IP addresses """
# To keep the EMC IP addresses information between service_parameter
# db and addresses db in-sync. So that sysinv won't assign these IP
# addresses to someone else
#
# service_parameter | addresses
# ------------------------------------------------------------
# san_ip | controller-emc-vnx-san-ip-<network-type>
# (user provides) |
# ------------------------------------------------------------
# san_secondary_ip | controller-emc-vnx-san-
# (user provides) | secondary-ip-<network-type>
# ------------------------------------------------------------
# data_san_ip | controller-emc-vnx-data-san-ip-
# | <network-type> (generated internally)
# ------------------------------------------------------------
#
# controller-emc-vnx-san-ip and controller-emc-vnx-san-secondary-ip
# are in 'control_network' network and controller-emc-vnx-data-san-ip
# is in 'data_network' network.
feature_enabled = service_parameter._emc_vnx_get_param_from_name(
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED)
data_san_ip_param = service_parameter._emc_vnx_get_param_from_name(
service_parameter.CINDER_EMC_VNX_DATA_SAN_IP)
prev_data_san_ip_db = service_parameter._emc_vnx_get_address_db(
service_parameter.CINDER_EMC_VNX_DATA_SAN_IP,
control_network=False)[0]
# Always remove the reserved control IP addresses out of network
# because of the following scenarios:
# * feature turned off need to delete
# * user modifies 'control_network' parameter from e.g. infra to oam
# And later will be re-added if neccessary
prev_san_ip_db, prev_control_network_type = \
service_parameter._emc_vnx_get_address_db(
service_parameter.CINDER_EMC_VNX_SAN_IP, control_network=True)
service_parameter._emc_vnx_db_destroy_address(prev_san_ip_db)
prev_san_secondary_ip_db = service_parameter._emc_vnx_get_address_db(
service_parameter.CINDER_EMC_VNX_SAN_SECONDARY_IP,
network_type=prev_control_network_type)[0]
service_parameter._emc_vnx_db_destroy_address(prev_san_secondary_ip_db)
# Enabling emc_vnx feature, we need to
if feature_enabled.value.lower() == 'true':
# Control IP, user will provide san_ip and san_secondary_ip
# (optional). Here we just save these IP addresses into
# 'control_network' network
control_network_param = \
service_parameter._emc_vnx_get_param_from_name(
service_parameter.CINDER_EMC_VNX_CONTROL_NETWORK)
# Don't reserve address for oam network
if control_network_param.value != constants.NETWORK_TYPE_OAM:
try:
pool_uuid = pecan.request.dbapi.network_get_by_type(
control_network_param.value).pool_uuid
pool = pecan.request.dbapi.address_pool_get(pool_uuid)
service_parameter._emc_vnx_save_address_from_param(
service_parameter.CINDER_EMC_VNX_SAN_IP,
control_network_param.value, pool)
service_parameter._emc_vnx_save_address_from_param(
service_parameter.CINDER_EMC_VNX_SAN_SECONDARY_IP,
control_network_param.value, pool)
except exception.NetworkTypeNotFound:
msg = _("Unable to apply service parameters. "
"Cannot find specified EMC control "
"network '%s'" % control_network_param.value)
raise wsme.exc.ClientSideError(msg)
except exception.AddressPoolNotFound:
msg = _("Unable to apply service parameters. "
"Network '%s' has no address pool associated" %
control_network_param.value)
raise wsme.exc.ClientSideError(msg)
# Data IP, we need to assign an IP address out of 'data_network'
# network set it to readonly service parameter 'data-san-ip'.
#
# User can change the data_network (e.g from infra to mgnt)
# which means we need to remove the existing and assign new IP
# from new data_network
data_network_param = service_parameter._emc_vnx_get_param_from_name(
service_parameter.CINDER_EMC_VNX_DATA_NETWORK)
try:
data_network_db = pecan.request.dbapi.network_get_by_type(
data_network_param.value)
except exception.NetworkTypeNotFound:
msg = _("Unable to apply service parameters. "
"Cannot find specified EMC data network '%s'" % (
data_network_param.value))
raise wsme.exc.ClientSideError(msg)
# If addressses db already contain the address and new request
# come in with different network we first need to delete the
# existing one
if (prev_data_san_ip_db and prev_data_san_ip_db.pool_uuid !=
data_network_db.pool_uuid):
service_parameter._emc_vnx_destroy_data_san_address(
data_san_ip_param, prev_data_san_ip_db)
data_san_ip_param = None
if not data_san_ip_param:
try:
assigned_address = (
address_pool.AddressPoolController.assign_address(
None, data_network_db.pool_uuid,
service_parameter._emc_vnx_format_address_name_db(
service_parameter.CINDER_EMC_VNX_DATA_SAN_IP,
data_network_param.value)))
pecan.request.dbapi.service_parameter_create({
'service': constants.SERVICE_TYPE_CINDER,
'section':
constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX,
'name': service_parameter.CINDER_EMC_VNX_DATA_SAN_IP,
'value': assigned_address.address})
except exception.AddressPoolExhausted:
msg = _("Unable to apply service parameters. "
"The address pool '%s' in Data EMC network '%s' "
"is full" % (data_network_db.pool_uuid,
data_network_param.value))
raise wsme.exc.ClientSideError(msg)
except exception.AddressNotFound:
msg = _("Unable to apply service parameters. "
"Cannot add generated '%s' address into "
"pool '%s'" % (service_parameter.CINDER_EMC_VNX_DATA_SAN_IP,
data_network_db.pool_uuid))
raise wsme.exc.ClientSideError(msg)
except exception.ServiceParameterAlreadyExists:
# If can not add assigned data san ip address into
# service parameter then need to release it too
service_parameter._emc_vnx_db_destroy_address(
assigned_address)
msg = _("Unable to apply service parameters. "
"Cannot add generated '%s' address '%s' "
"into service parameter '%s'" % (
service_parameter.CINDER_EMC_VNX_DATA_SAN_IP,
assigned_address.address,
data_san_ip_param.value))
raise wsme.exc.ClientSideError(msg)
else:
# Need to remove the reserved Data IP addresses out of network
service_parameter._emc_vnx_destroy_data_san_address(
data_san_ip_param, prev_data_san_ip_db)
@staticmethod
def _service_parameter_apply_semantic_check_mtce():
"""Semantic checks for the Platform Maintenance Service Type """
@ -984,32 +721,6 @@ class ServiceParameterController(rest.RestController):
if service == constants.SERVICE_TYPE_IDENTITY:
self._service_parameter_apply_semantic_check_identity()
if service == constants.SERVICE_TYPE_CINDER:
# Make sure one of the internal cinder configs is enabled so that
# we know cinder is operational in this region
if not StorageBackendConfig.is_service_enabled(pecan.request.dbapi,
constants.SB_SVC_CINDER,
filter_shared=True):
msg = _("Cannot apply Cinder configuration. Cinder is not "
"currently enabled on either the %s or %s backends."
% (constants.SB_TYPE_LVM, constants.SB_TYPE_CEPH))
raise wsme.exc.ClientSideError(msg)
self._service_parameter_apply_semantic_check_cinder_default()
self._service_parameter_apply_semantic_check_cinder_emc_vnx()
self._emc_vnx_ip_addresses_reservation()
self._service_parameter_apply_semantic_check_cinder_hpe3par(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR)
self._hpe3par_reserve_ip_addresses(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR)
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
section = "{0}{1}".format(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR, i)
self._service_parameter_apply_semantic_check_cinder_hpe3par(section)
self._hpe3par_reserve_ip_addresses(section)
self._service_parameter_apply_semantic_check_cinder_hpelefthand()
self._hpelefthand_reserve_ip_addresses()
if service == constants.SERVICE_TYPE_PLATFORM:
self._service_parameter_apply_semantic_check_mtce()
@ -1039,308 +750,3 @@ class ServiceParameterController(rest.RestController):
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(e)
@staticmethod
def _hpe3par_reserve_ip_addresses(section):
"""
We need to keep the address information between service_parameter
db and addresses db in-sync so that sysinv won't assign the IP
addresses to someone else.
Create an entry in the addresses db for each service parameter.
Service Parameter | Address DB Entry Name
---------------------------------------------------------------
hpe3par_api_url | <section>-api-ip
---------------------------------------------------------------
hpe3par_iscsi_ips | <section>-iscsi-ip<n>
---------------------------------------------------------------
san_ip | <section>-san-ip
---------------------------------------------------------------
"""
#
# Remove current addresses. They will be added below if the
# feature is enabled.
#
name = section + "-api-ip"
try:
addr = pecan.request.dbapi.address_get_by_name(name)
LOG.debug("Removing address %s" % name)
pecan.request.dbapi.address_destroy(addr.uuid)
except exception.AddressNotFoundByName:
pass
i = 0
while True:
name = section + "-iscsi-ip" + str(i)
try:
addr = pecan.request.dbapi.address_get_by_name(name)
LOG.debug("Removing address %s" % name)
pecan.request.dbapi.address_destroy(addr.uuid)
i += 1
except exception.AddressNotFoundByName:
break
name = section + "-san-ip"
try:
addr = pecan.request.dbapi.address_get_by_name(name)
LOG.debug("Removing address %s" % name)
pecan.request.dbapi.address_destroy(addr.uuid)
except exception.AddressNotFoundByName:
pass
enabled = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=section,
name="enabled")
if enabled.value.lower() == 'false':
return
#
# Add the hpe3par-api-ip address.
#
api_url = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=section,
name="hpe3par_api_url")
url = urlparse(api_url.value)
ip = netaddr.IPAddress(url.hostname)
pool = service_parameter._get_network_pool_from_ip_address(ip, service_parameter.HPE_DATA_NETWORKS)
#
# Is the address in one of the supported network pools? If so, reserve it.
#
if pool is not None:
try:
name = section + "-api-ip"
address = {'address': str(ip),
'prefix': pool['prefix'],
'family': pool['family'],
'enable_dad': constants.IP_DAD_STATES[pool['family']],
'address_pool_id': pool['id'],
'interface_id': None,
'name': name}
LOG.debug("Reserving address %s" % name)
pecan.request.dbapi.address_create(address)
except exception.AddressAlreadyExists:
msg = _("Unable to apply service parameters. "
"Unable to save address '%s' ('%s') into "
"pool '%s'" % (name, str(ip), pool['name']))
raise wsme.exc.ClientSideError(msg)
#
# Add the hpe3par-iscsi-ip addresses.
#
iscsi_ips = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR,
name="hpe3par_iscsi_ips")
addrs = iscsi_ips.value.split(',')
i = 0
for addr in addrs:
ipstr = addr.split(':')
ip = netaddr.IPAddress(ipstr[0])
pool = service_parameter._get_network_pool_from_ip_address(ip, service_parameter.HPE_DATA_NETWORKS)
#
# Is the address in one of the supported network pools? If so, reserve it.
#
if pool is not None:
try:
name = section + "-iscsi-ip" + str(i)
address = {'address': str(ip),
'prefix': pool['prefix'],
'family': pool['family'],
'enable_dad': constants.IP_DAD_STATES[pool['family']],
'address_pool_id': pool['id'],
'interface_id': None,
'name': name}
LOG.debug("Reserving address %s" % name)
pecan.request.dbapi.address_create(address)
except exception.AddressAlreadyExists:
msg = _("Unable to apply service parameters. "
"Unable to save address '%s' ('%s') into "
"pool '%s'" % (name, str(ip), pool['name']))
raise wsme.exc.ClientSideError(msg)
i += 1
#
# Optionally add the hpe3par-san-ip address.
#
try:
san_ip = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=section,
name="san_ip")
except exception.NotFound:
return
ip = netaddr.IPAddress(san_ip.value)
pool = service_parameter._get_network_pool_from_ip_address(ip, service_parameter.HPE_DATA_NETWORKS)
#
# Is the address in one of the supported network pools? If so, reserve it.
#
if pool is not None:
try:
name = section + "-san-ip"
address = {'address': str(ip),
'prefix': pool['prefix'],
'family': pool['family'],
'enable_dad': constants.IP_DAD_STATES[pool['family']],
'address_pool_id': pool['id'],
'interface_id': None,
'name': name}
LOG.debug("Reserving address %s" % name)
pecan.request.dbapi.address_create(address)
except exception.AddressAlreadyExists:
msg = _("Unable to apply service parameters. "
"Unable to save address '%s' ('%s') into "
"pool '%s'" % (name, str(ip), pool['name']))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _hpelefthand_reserve_ip_addresses():
"""
We need to keep the address information between service_parameter
db and addresses db in-sync so that sysinv won't assign the IP
addresses to someone else.
Create an entry in the addresses db for each service parameter.
Service Parameter | Address DB Entry Name
---------------------------------------------------------------
hpelefthand_api_url | hpelefthand-api-ip
---------------------------------------------------------------
"""
#
# Remove current addresses. They will be added below if the
# feature is enabled.
#
name = "hpelefthand-api-ip"
try:
addr = pecan.request.dbapi.address_get_by_name(name)
LOG.debug("Removing address %s" % name)
pecan.request.dbapi.address_destroy(addr.uuid)
except exception.AddressNotFoundByName:
pass
enabled = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND,
name="enabled")
if enabled.value.lower() == 'false':
return
#
# Add the hplefthand-api-ip address.
#
api_url = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND,
name="hpelefthand_api_url")
url = urlparse(api_url.value)
ip = netaddr.IPAddress(url.hostname)
pool = service_parameter._get_network_pool_from_ip_address(ip, service_parameter.HPE_DATA_NETWORKS)
if pool is not None:
try:
address = {'address': str(ip),
'prefix': pool['prefix'],
'family': pool['family'],
'enable_dad': constants.IP_DAD_STATES[pool['family']],
'address_pool_id': pool['id'],
'interface_id': None,
'name': name}
LOG.debug("Reserving address %s" % name)
pecan.request.dbapi.address_create(address)
except exception.AddressAlreadyExists:
msg = _("Unable to apply service parameters. "
"Unable to save address '%s' ('%s') into "
"pool '%s'" % (name, str(ip), pool['name']))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _service_parameter_apply_semantic_check_cinder_hpe3par(section):
"""Semantic checks for the Cinder Service Type """
feature_enabled = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=section,
name=constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED)
if feature_enabled.value.lower() == 'true':
# Client library installed? If not fail.
if not service_parameter._rpm_pkg_is_installed('python-3parclient'):
msg = _("Unable to apply service parameters. "
"Missing client library python-3parclient.")
raise wsme.exc.ClientSideError(msg)
for name in service_parameter.CINDER_HPE3PAR_PARAMETER_REQUIRED:
try:
pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=section,
name=name)
except exception.NotFound:
msg = _("Unable to apply service parameters. "
"Missing service parameter '%s' for service '%s' "
"in section '%s'." % (name,
constants.SERVICE_TYPE_CINDER,
section))
raise wsme.exc.ClientSideError(msg)
else:
if not pecan.request.rpcapi.validate_hpe3par_removal(
pecan.request.context, section):
msg = _("Unable to apply service parameters. Can not disable "
"%s while in use. Remove any HPE3PAR volumes."
% section)
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _service_parameter_apply_semantic_check_cinder_hpelefthand():
"""Semantic checks for the Cinder Service Type """
feature_enabled = pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND,
name=constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED)
if feature_enabled.value.lower() == 'true':
# Client library installed? If not fail.
if not service_parameter._rpm_pkg_is_installed('python-lefthandclient'):
msg = _("Unable to apply service parameters. "
"Missing client library python-lefthandclient.")
raise wsme.exc.ClientSideError(msg)
for name in service_parameter.CINDER_HPELEFTHAND_PARAMETER_REQUIRED:
try:
pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND,
name=name)
except exception.NotFound:
msg = _("Unable to apply service parameters. "
"Missing service parameter '%s' for service '%s' "
"in section '%s'." % (name,
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND))
raise wsme.exc.ClientSideError(msg)
else:
if not pecan.request.rpcapi.validate_hpelefthand_removal(
pecan.request.context):
msg = _("Unable to apply service parameters. Can not disable "
"%s while in use. Remove any HPELEFTHAND volumes."
% constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND)
raise wsme.exc.ClientSideError(msg)

View File

@ -220,36 +220,6 @@ class StorageBackendController(rest.RestController):
sort_key=sort_key,
sort_dir=sort_dir)
# TODO: External backend case for emc_vnx, hpe3par, hpelefthand will be
# handled in a separate task
# If cinder is not configured yet, calling cinder_has_external_backend() will
# timeout. If any of these loosely coupled backend exists, create an external
# backend with services set to cinder if external backend is not created yet.
# if api_helper.is_svc_enabled(storage_backends, constants.SB_SVC_CINDER):
# try:
# if pecan.request.rpcapi.cinder_has_external_backend(pecan.request.context):
#
# # Check if external backend already exists.
# need_soft_ext_sb = True
# for s_b in storage_backends:
# if s_b.backend == constants.SB_TYPE_EXTERNAL:
# if s_b.services is None:
# s_b.services = [constants.SB_SVC_CINDER]
# elif constants.SB_SVC_CINDER not in s_b.services:
# s_b.services.append(constants.SB_SVC_CINDER)
# need_soft_ext_sb = False
# break
#
# if need_soft_ext_sb:
# ext_sb = StorageBackend()
# ext_sb.backend = constants.SB_TYPE_EXTERNAL
# ext_sb.state = constants.SB_STATE_CONFIGURED
# ext_sb.task = constants.SB_TASK_NONE
# ext_sb.services = [constants.SB_SVC_CINDER]
# storage_backends.extend([ext_sb])
# except Timeout:
# LOG.exception("Timeout while getting external backend list!")
return StorageBackendCollection\
.convert_with_links(storage_backends,
limit,

View File

@ -478,9 +478,9 @@ def _discover_and_validate_rbd_provisioner_capabilities(caps_dict, storage_ceph)
# Check for a valid RBD StorageClass name
name = caps_dict[constants.K8S_RBD_PROV_STORAGECLASS_NAME]
if not r.match(name):
msg = _("Invalid RBD StorageClass name '%s'. %s" %
(name, msg_help))
raise wsme.exc.ClientSideError(msg)
msg = _("Invalid RBD StorageClass name '%s'. %s" %
(name, msg_help))
raise wsme.exc.ClientSideError(msg)
# Check the uniqueness of RBD StorageClass name in DB.
if constants.K8S_RBD_PROV_STORAGECLASS_NAME in caps_dict:

View File

@ -363,10 +363,9 @@ def _apply_backend_changes(op, sb_obj):
pecan.request.rpcapi.update_external_cinder_config)
else:
# If no service is specified or glance is the only service, this is a DB
# only change => Set the state to configured
pecan.request.dbapi.storage_external_update(
sb_obj.uuid,
# If no service is specified or glance or swift is the only service
# this is a DB only change => Set the state to configured
pecan.request.dbapi.storage_external_update(sb_obj.uuid,
{'state': constants.SB_STATE_CONFIGURED})
# update shared_services

View File

@ -181,13 +181,6 @@ PATCH_DEFAULT_TIMEOUT_IN_SECS = 6
# ihost field attributes
IHOST_STOR_FUNCTION = 'stor_function'
# idisk stor function
IDISK_STOR_FUNCTION = 'stor_function'
IDISK_STOR_FUNC_ROOT = 'rootfs'
# idisk device functions
IDISK_DEV_FUNCTION = 'device_function'
IDISK_DEV_FUNC_CINDER = 'cinder_device'
# ihost config_status field values
CONFIG_STATUS_OUT_OF_DATE = "Config out-of-date"
CONFIG_STATUS_REINSTALL = "Reinstall required"
@ -933,28 +926,6 @@ SERVICE_PARAM_SECTION_IDENTITY_IDENTITY = 'identity'
SERVICE_PARAM_SECTION_IDENTITY_LDAP = 'ldap'
SERVICE_PARAM_SECTION_IDENTITY_CONFIG = 'config'
SERVICE_PARAM_SECTION_CINDER_DEFAULT = 'DEFAULT'
SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE = 'default_volume_type'
SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH = 'multipath'
SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE = 'multipath.state'
SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE_DISABLED = 'false'
SERVICE_PARAM_SECTION_CINDER_EMC_VNX = 'emc_vnx'
SERVICE_PARAM_CINDER_EMC_VNX_ENABLED = 'enabled'
SERVICE_PARAM_SECTION_CINDER_EMC_VNX_STATE = 'emc_vnx.state'
SERVICE_PARAM_MAX_HPE3PAR = 12
SERVICE_PARAM_SECTION_CINDER_HPE3PAR = 'hpe3par'
SERVICE_PARAM_SECTION_CINDER_HPE3PAR_STATE = 'hpe3par.state'
SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND = 'hpelefthand'
SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND_STATE = 'hpelefthand.state'
SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS = 'status'
SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLING = 'disabling'
SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLED = 'disabled'
SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED = 'enabled'
SERVICE_PARAM_IDENTITY_CONFIG_TOKEN_EXPIRATION = 'token_expiration'
SERVICE_PARAM_IDENTITY_CONFIG_TOKEN_EXPIRATION_DEFAULT = 3600

View File

@ -7,11 +7,9 @@
# coding=utf-8
#
import json
import ldap
import ldapurl
import netaddr
import os
import pecan
import re
import rpm
@ -36,15 +34,6 @@ SERVICE_PARAMETER_DATA_FORMAT_SKIP = 'skip'
IDENTITY_CONFIG_TOKEN_EXPIRATION_MIN = 3600
IDENTITY_CONFIG_TOKEN_EXPIRATION_MAX = 14400
EMC_VNX_CONTROL_NETWORK_TYPES = [
constants.NETWORK_TYPE_MGMT,
constants.NETWORK_TYPE_OAM,
]
EMC_VNX_DATA_NETWORK_TYPES = [
constants.NETWORK_TYPE_MGMT,
]
def _validate_boolean(name, value):
if value.lower() not in ['true', 'false']:
@ -246,123 +235,11 @@ def _validate_ip_address(name, value):
"Parameter '%s' must be an IP address." % name))
def _validate_emc_vnx_iscsi_initiators(name, value):
"""Check if iscsi_initiators value is valid. An example of valid
iscsi_initiators string:
{"worker-0": ["10.0.0.1", "10.0.0.2"], "worker-1": ["10.0.0.3"]}
"""
try:
iscsi_initiators = json.loads(value)
if not isinstance(iscsi_initiators, dict):
raise ValueError
for hostname, initiators_ips in iscsi_initiators.items():
if not isinstance(initiators_ips, list):
raise ValueError
else:
for ip in initiators_ips:
if not cutils.is_valid_ip(ip):
raise ValueError
except ValueError:
raise wsme.exc.ClientSideError(_(
"Parameter '%s' must be an dict of IP addresses lists." % name))
def _validate_emc_vnx_storage_vnx_security_file_dir(name, value):
"""Check if security_file_dir exits"""
if not os.path.exists(value):
raise wsme.exc.ClientSideError(_(
"Parameter '%s' must be an existing path" % name))
def _validate_emc_vnx_storage_vnx_authentication_type(name, value):
_validate_value_in_set(
name, value,
['global', 'local', 'ldap'])
def _validate_read_only(name, value):
raise wsme.exc.ClientSideError(_(
"Parameter '%s' is readonly" % name))
def _validate_emc_vnx_control_network_type(name, value):
_validate_value_in_set(
name, value, EMC_VNX_CONTROL_NETWORK_TYPES
)
def _validate_emc_vnx_data_network_type(name, value):
_validate_value_in_set(
name, value, EMC_VNX_DATA_NETWORK_TYPES
)
def _validate_hpe_api_url(name, value):
url = urlparse(value)
if cutils.is_valid_ip(url.hostname):
try:
ip_addr = netaddr.IPNetwork(url.hostname)
except netaddr.core.AddrFormatError:
raise wsme.exc.ClientSideError(_(
"Invalid URL address '%s' for '%s'" % (value, name)))
if ip_addr.is_loopback():
raise wsme.exc.ClientSideError(_(
"URL '%s' must not be loopback for '%s'" % (value, name)))
elif url.hostname:
if constants.LOCALHOST_HOSTNAME in url.hostname.lower():
raise wsme.exc.ClientSideError(_(
"URL '%s' must not be localhost for '%s'" % (value, name)))
else:
raise wsme.exc.ClientSideError(_(
"Invalid URL address '%s' for '%s'" % (value, name)))
def _validate_hpe3par_iscsi_ips(name, value):
"""
Validate list of IP addresses with an optional port number.
For example:
"10.10.220.253:3261,10.10.222.234"
"""
ip_addrs = value.split(',')
if len(ip_addrs) == 0:
raise wsme.exc.ClientSideError(_(
"No IP addresses provided for '%s'" % name))
for ip_addr in ip_addrs:
ipstr = ip_addr.split(':')
if len(ipstr) == 1:
_validate_ip_address(name, ipstr[0])
elif len(ipstr) == 2:
_validate_ip_address(name, ipstr[0])
#
# Validate port number
#
try:
port = int(ipstr[1])
except ValueError:
raise wsme.exc.ClientSideError(_(
"Invalid port number '%s' for '%s'" % (ipstr[1], name)))
if port < 0 or port > 65535:
raise wsme.exc.ClientSideError(_(
"Port number '%d' must be between 0 and 65535 in '%s'" %
(port, name)))
else:
raise wsme.exc.ClientSideError(_(
"Invalid IP address '%s' in '%s'" % (ipstr, name)))
#
# Address must be in one of the supported network's pools.
#
ip = netaddr.IPAddress(ipstr[0])
pool = _get_network_pool_from_ip_address(ip, HPE_DATA_NETWORKS)
if pool is None:
raise wsme.exc.ClientSideError(_(
"Invalid IP address '%s' in '%s'" % (ipstr[0], name)))
def _validate_pci_alias(name, value):
allowed = ['vendor_id', 'product_id', 'class_id', 'name', 'device_id']
disallowed_names = [constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_NAME,
@ -443,95 +320,6 @@ def _get_network_pool_from_ip_address(ip, networks):
return None
def _emc_vnx_get_param_from_name(param_name):
try:
return pecan.request.dbapi.service_parameter_get_one(
service=constants.SERVICE_TYPE_CINDER,
section=constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX,
name=param_name)
except exception.NotFound:
return None
def _emc_vnx_format_address_name_db(name, network_type):
hostname = 'controller-emc-vnx-' + name.replace('_', '-')
return cutils.format_address_name(hostname, network_type)
def _emc_vnx_get_address_db(address_name, network_type=None,
control_network=True):
if network_type:
network_types = [network_type]
elif control_network:
network_types = EMC_VNX_CONTROL_NETWORK_TYPES
else:
network_types = EMC_VNX_DATA_NETWORK_TYPES
for n in network_types:
address_name_db = _emc_vnx_format_address_name_db(address_name, n)
try:
address_db = pecan.request.dbapi.address_get_by_name(
address_name_db)
return address_db, n
except exception.AddressNotFoundByName:
pass
return None, None
def _emc_vnx_db_destroy_address(address_db):
if address_db:
try:
pecan.request.dbapi.address_destroy(address_db.uuid)
except exception.AddressNotFound:
msg = _("Unable to apply service parameters. "
"Cannot destroy address '%s'" % address_db.address)
raise wsme.exc.ClientSideError(msg)
def _emc_vnx_save_address_from_param(address_param_name, network_type, pool):
ip_db_name = _emc_vnx_format_address_name_db(address_param_name,
network_type)
# Now save the new IP address
ip_param = _emc_vnx_get_param_from_name(address_param_name)
if ip_param:
try:
address = {'address': ip_param.value,
'prefix': pool['prefix'],
'family': pool['family'],
'enable_dad': constants.IP_DAD_STATES[pool['family']],
'address_pool_id': pool['id'],
'interface_id': None,
'name': ip_db_name}
pecan.request.dbapi.address_create(address)
except exception.AddressNotFound:
msg = _("Unable to apply service parameters. "
"Unable to save address '%s' ('%s') into "
"pool '%s'" % (address_param_name, ip_param.value,
pool['name']))
raise wsme.exc.ClientSideError(msg)
def _emc_vnx_destroy_data_san_address(data_san_addr_param, data_san_db):
if data_san_db:
try:
pecan.request.dbapi.address_destroy(data_san_db.uuid)
except exception.AddressNotFound:
msg = _("Unable to apply service parameters. "
"Cannot destroy address '%s'" % data_san_db.uuid)
raise wsme.exc.ClientSideError(msg)
if data_san_addr_param:
try:
pecan.request.dbapi.service_parameter_destroy_uuid(
data_san_addr_param.uuid)
except exception.NotFound:
msg = _("Unable to apply service parameters. "
"Cannot delete the service parameter "
"data-san-ip '%s'" % data_san_addr_param.uuid)
raise wsme.exc.ClientSideError(msg)
def _validate_worker_boot_timeout(name, value):
_validate_range(name, value,
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MIN,
@ -1078,285 +866,6 @@ NETWORK_DEFAULT_PARAMETER_DATA_FORMAT = {
constants.SERVICE_PARAM_NAME_DEFAULT_SERVICE_PLUGINS: SERVICE_PARAMETER_DATA_FORMAT_ARRAY,
}
#
# Cinder DEFAULT service parameters
#
CINDER_DEFAULT_PARAMETER_MANDATORY = [
]
CINDER_DEFAULT_PARAMETER_PROTECTED = []
# If the lists:
#
# * CINDER_DEFAULT_PARAMETER_PROTECTED
# * CINDER_DEFAULT_PARAMETER_REQUIRED
# * CINDER_DEFAULT_PARAMETER_OPTIONAL
#
# are changed, we must update the
# SP_CINDER_DEFAULT_ALL_SUPPORTTED_PARAMS list in
# packstack/plugins/cinder_250.py.
CINDER_DEFAULT_PARAMETER_REQUIRED = []
CINDER_DEFAULT_PARAMETER_OPTIONAL = (
CINDER_DEFAULT_PARAMETER_REQUIRED +
CINDER_DEFAULT_PARAMETER_PROTECTED + [
constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE,
constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH,
]
)
CINDER_DEFAULT_PARAMETER_VALIDATOR = {
# Mandatory parameters
# Required parameters
# Optional parameters
constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE: _validate_not_empty,
constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH: _validate_no_update,
}
CINDER_DEFAULT_PARAMETER_RESOURCE = {
# Mandatory parameters
# Required parameters
# Optional parameters
constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE: None,
constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH: None,
}
CINDER_EMC_VNX_SAN_IP = 'san_ip'
CINDER_EMC_VNX_SAN_SECONDARY_IP = 'san_secondary_ip'
CINDER_EMC_VNX_DATA_SAN_IP = 'data_san_ip'
CINDER_EMC_VNX_CONTROL_NETWORK = 'control_network'
CINDER_EMC_VNX_DATA_NETWORK = 'data_network'
# Cinder emc_vnx Service Parameters
CINDER_EMC_VNX_PARAMETER_MANDATORY = [
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED,
]
# If the list CINDER_EMC_VNX_PARAMETER_PROTECTED,
# CINDER_EMC_VNX_PARAMETER_REQUIRED_ON_FEATURE_ENABLED,
# and CINDER_EMC_VNX_PARAMETER_OPTIONAL are changed. We must
# update the SP_CINDER_EMC_VNX_ALL_SUPPORTTED_PARAMS list in
# packstack/plugins/cinder_250.py as well.
CINDER_EMC_VNX_PARAMETER_REQUIRED_ON_FEATURE_ENABLED = [
CINDER_EMC_VNX_CONTROL_NETWORK, CINDER_EMC_VNX_DATA_NETWORK,
CINDER_EMC_VNX_SAN_IP,
]
CINDER_EMC_VNX_PARAMETER_PROTECTED = [
'san_login', 'san_password',
]
CINDER_EMC_VNX_PARAMETER_OPTIONAL = (
CINDER_EMC_VNX_PARAMETER_REQUIRED_ON_FEATURE_ENABLED +
CINDER_EMC_VNX_PARAMETER_PROTECTED + [
'storage_vnx_pool_names', 'storage_vnx_security_file_dir',
CINDER_EMC_VNX_SAN_SECONDARY_IP, 'iscsi_initiators',
'storage_vnx_authentication_type', 'initiator_auto_deregistration',
'default_timeout', 'ignore_pool_full_threshold',
'max_luns_per_storage_group', 'destroy_empty_storage_group',
'force_delete_lun_in_storagegroup', 'io_port_list',
'check_max_pool_luns_threshold',
CINDER_EMC_VNX_DATA_SAN_IP,
]
)
CINDER_EMC_VNX_PARAMETER_VALIDATOR = {
# Mandatory parameters
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED:
_validate_boolean,
# Required parameters
'san_ip': _validate_ip_address,
# Optional parameters
'storage_vnx_pool_names': _validate_not_empty,
'san_login': _validate_not_empty,
'san_password': _validate_not_empty,
'storage_vnx_security_file_dir':
_validate_emc_vnx_storage_vnx_security_file_dir,
'san_secondary_ip': _validate_ip_address,
'iscsi_initiators': _validate_emc_vnx_iscsi_initiators,
'storage_vnx_authentication_type':
_validate_emc_vnx_storage_vnx_authentication_type,
'initiator_auto_deregistration': _validate_boolean,
'default_timeout': _validate_integer,
'ignore_pool_full_threshold': _validate_boolean,
'max_luns_per_storage_group': _validate_integer,
'destroy_empty_storage_group': _validate_boolean,
'force_delete_lun_in_storagegroup': _validate_boolean,
'io_port_list': _validate_not_empty,
'check_max_pool_luns_threshold': _validate_boolean,
'control_network': _validate_emc_vnx_control_network_type,
'data_network': _validate_emc_vnx_data_network_type,
'data_san_ip': _validate_read_only,
}
CINDER_EMC_VNX_PARAMETER_RESOURCE = {
# Mandatory parameters
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED: None,
# Required parameters
'san_ip': None,
# Optional parameters
'storage_vnx_pool_names': None,
'san_login': None,
'san_password': None,
'storage_vnx_security_file_dir': None,
'san_secondary_ip': None,
'iscsi_initiators': None,
'storage_vnx_authentication_type': None,
'initiator_auto_deregistration': None,
'default_timeout': None,
'ignore_pool_full_threshold': None,
'max_luns_per_storage_group': None,
'destroy_empty_storage_group': None,
'force_delete_lun_in_storagegroup': None,
'io_port_list': None,
'check_max_pool_luns_threshold': None,
'control_network': None,
'data_network': None,
'data_san_ip': None,
}
HPE_DATA_NETWORKS = [
constants.NETWORK_TYPE_MGMT,
]
#
# Cinder HPE3PAR Service Parameters
#
CINDER_HPE3PAR_PARAMETER_MANDATORY = [
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED,
]
CINDER_HPE3PAR_PARAMETER_PROTECTED = [
'san_login', 'san_password',
]
# If the lists:
#
# * CINDER_HPE3PAR_PARAMETER_PROTECTED
# * CINDER_HPE3PAR_PARAMETER_REQUIRED
# * CINDER_HPE3PAR_PARAMETER_OPTIONAL
#
# are changed, we must update the
# SP_CINDER_HPE3PAR_ALL_SUPPORTTED_PARAMS list in
# packstack/plugins/cinder_250.py.
CINDER_HPE3PAR_PARAMETER_REQUIRED = [
'hpe3par_api_url', 'hpe3par_username', 'hpe3par_password',
'hpe3par_cpg', 'hpe3par_cpg_snap', 'hpe3par_snapshot_expiration',
'hpe3par_iscsi_ips'
]
CINDER_HPE3PAR_PARAMETER_OPTIONAL = (
CINDER_HPE3PAR_PARAMETER_REQUIRED +
CINDER_HPE3PAR_PARAMETER_PROTECTED + [
'hpe3par_debug', 'hpe3par_iscsi_chap_enabled',
'san_ip'
]
)
CINDER_HPE3PAR_PARAMETER_VALIDATOR = {
# Mandatory parameters
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED: _validate_boolean,
# Required parameters
'hpe3par_api_url': _validate_hpe_api_url,
'hpe3par_username': _validate_not_empty,
'hpe3par_password': _validate_not_empty,
'hpe3par_cpg': _validate_not_empty,
'hpe3par_cpg_snap': _validate_not_empty,
'hpe3par_snapshot_expiration': _validate_integer,
'hpe3par_iscsi_ips': _validate_hpe3par_iscsi_ips,
# Optional parameters
'hpe3par_debug': _validate_boolean,
'hpe3par_scsi_chap_enabled': _validate_boolean,
'san_login': _validate_not_empty,
'san_password': _validate_not_empty,
'san_ip': _validate_ip_address,
}
CINDER_HPE3PAR_PARAMETER_RESOURCE = {
# Mandatory parameters
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED: None,
# Required parameters
'hpe3par_api_url': None,
'hpe3par_username': None,
'hpe3par_password': None,
'hpe3par_cpg': None,
'hpe3par_cpg_snap': None,
'hpe3par_snapshot_expiration': None,
'hpe3par_iscsi_ips': None,
# Optional parameters
'hpe3par_debug': None,
'hpe3par_scsi_chap_enabled': None,
'san_login': None,
'san_password': None,
'san_ip': None,
}
#
# Cinder HPELEFTHAND Service Parameters
#
CINDER_HPELEFTHAND_PARAMETER_MANDATORY = [
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED,
]
CINDER_HPELEFTHAND_PARAMETER_PROTECTED = []
# If the lists:
#
# * CINDER_HPELEFTHAND_PARAMETER_PROTECTED
# * CINDER_HPELEFTHAND_PARAMETER_REQUIRED
# * CINDER_HPELEFTHAND_PARAMETER_OPTIONAL
#
# are changed, we must update the
# SP_CINDER_HPELEFTHAND_ALL_SUPPORTTED_PARAMS list in
# packstack/plugins/cinder_250.py.
CINDER_HPELEFTHAND_PARAMETER_REQUIRED = [
'hpelefthand_api_url', 'hpelefthand_username', 'hpelefthand_password',
'hpelefthand_clustername'
]
CINDER_HPELEFTHAND_PARAMETER_OPTIONAL = (
CINDER_HPELEFTHAND_PARAMETER_REQUIRED +
CINDER_HPELEFTHAND_PARAMETER_PROTECTED + [
'hpelefthand_debug', 'hpelefthand_ssh_port', 'hpelefthand_iscsi_chap_enabled'
]
)
CINDER_HPELEFTHAND_PARAMETER_VALIDATOR = {
# Mandatory parameters
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED: _validate_boolean,
# Required parameters
'hpelefthand_api_url': _validate_hpe_api_url,
'hpelefthand_username': _validate_not_empty,
'hpelefthand_password': _validate_not_empty,
'hpelefthand_clustername': _validate_not_empty,
# Optional parameters
'hpelefthand_debug': _validate_boolean,
'hpelefthand_ssh_port': _validate_integer,
'hpelefthand_iscsi_chap_enabled': _validate_boolean
}
CINDER_HPELEFTHAND_PARAMETER_RESOURCE = {
# Mandatory parameters
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED: None,
# Required parameters
'hpelefthand_api_url': None,
'hpelefthand_username': None,
'hpelefthand_password': None,
'hpelefthand_clustername': None,
# Optional parameters
'hpelefthand_debug': None,
'hpelefthand_ssh_port': None,
'hpelefthand_iscsi_chap_enabled': None,
}
# Maintenance Service Parameters
PLATFORM_MTCE_PARAMETER_MANDATORY = [
constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT,
@ -1537,39 +1046,6 @@ SERVICE_PARAM_PROTECTED = 'protected'
SERVICE_VALUE_PROTECTION_MASK = "****"
SERVICE_PARAMETER_SCHEMA = {
constants.SERVICE_TYPE_CINDER: {
constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT: {
SERVICE_PARAM_MANDATORY: CINDER_DEFAULT_PARAMETER_MANDATORY,
SERVICE_PARAM_PROTECTED: CINDER_DEFAULT_PARAMETER_PROTECTED,
SERVICE_PARAM_OPTIONAL: CINDER_DEFAULT_PARAMETER_OPTIONAL,
SERVICE_PARAM_VALIDATOR: CINDER_DEFAULT_PARAMETER_VALIDATOR,
SERVICE_PARAM_RESOURCE: CINDER_DEFAULT_PARAMETER_RESOURCE,
},
constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX: {
SERVICE_PARAM_MANDATORY: CINDER_EMC_VNX_PARAMETER_MANDATORY,
SERVICE_PARAM_PROTECTED: CINDER_EMC_VNX_PARAMETER_PROTECTED,
SERVICE_PARAM_OPTIONAL: CINDER_EMC_VNX_PARAMETER_OPTIONAL,
SERVICE_PARAM_VALIDATOR: CINDER_EMC_VNX_PARAMETER_VALIDATOR,
SERVICE_PARAM_RESOURCE: CINDER_EMC_VNX_PARAMETER_RESOURCE,
},
constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR: {
SERVICE_PARAM_MANDATORY: CINDER_HPE3PAR_PARAMETER_MANDATORY,
SERVICE_PARAM_PROTECTED: CINDER_HPE3PAR_PARAMETER_PROTECTED,
SERVICE_PARAM_OPTIONAL: CINDER_HPE3PAR_PARAMETER_OPTIONAL,
SERVICE_PARAM_VALIDATOR: CINDER_HPE3PAR_PARAMETER_VALIDATOR,
SERVICE_PARAM_RESOURCE: CINDER_HPE3PAR_PARAMETER_RESOURCE,
},
constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND: {
SERVICE_PARAM_MANDATORY: CINDER_HPELEFTHAND_PARAMETER_MANDATORY,
SERVICE_PARAM_PROTECTED: CINDER_HPELEFTHAND_PARAMETER_PROTECTED,
SERVICE_PARAM_OPTIONAL: CINDER_HPELEFTHAND_PARAMETER_OPTIONAL,
SERVICE_PARAM_VALIDATOR: CINDER_HPELEFTHAND_PARAMETER_VALIDATOR,
SERVICE_PARAM_RESOURCE: CINDER_HPELEFTHAND_PARAMETER_RESOURCE,
},
},
constants.SERVICE_TYPE_IDENTITY: {
constants.SERVICE_PARAM_SECTION_IDENTITY_ASSIGNMENT: {
SERVICE_PARAM_MANDATORY: IDENTITY_ASSIGNMENT_PARAMETER_MANDATORY,

View File

@ -433,36 +433,6 @@ class ConductorManager(service.PeriodicService):
'name': constants.SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_RETRIES,
'value': constants.SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_RETRIES_DEFAULT
},
{'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX,
'name': constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED,
'value': False
},
{'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX_STATE,
'name': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS,
'value': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLED
},
{'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR,
'name': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED,
'value': False
},
{'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND,
'name': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED,
'value': False
},
{'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR_STATE,
'name': 'status',
'value': 'disabled'
},
{'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND_STATE,
'name': 'status',
'value': 'disabled'
},
{'service': constants.SERVICE_TYPE_PLATFORM,
'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE,
'name': constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT,
@ -523,16 +493,6 @@ class ConductorManager(service.PeriodicService):
},
]
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
section = "{0}{1}".format(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR, i)
DEFAULT_PARAMETERS.extend([
{'service': constants.SERVICE_TYPE_CINDER,
'section': section,
'name': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED,
'value': False
}]
)
def _create_default_service_parameter(self):
""" Populate the default service parameters"""
for p in ConductorManager.DEFAULT_PARAMETERS:
@ -4810,166 +4770,6 @@ class ConductorManager(service.PeriodicService):
'install_state_info':
host.install_state_info})
def _audit_cinder_state(self):
"""
Complete disabling the EMC by removing it from the list of cinder
services.
"""
emc_state_param = self._get_emc_state()
current_emc_state = emc_state_param.value
if (current_emc_state !=
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLING):
return
LOG.info("Running cinder state audit")
try:
hostname = socket.gethostname()
active_host = \
self.dbapi.ihost_get_by_hostname(hostname)
except Exception as e:
LOG.error(
"Failed to get local host object during cinder audit: %s",
str(e))
return
if (active_host and active_host.config_target and
active_host.config_applied == active_host.config_target):
# The manifest has been applied on the active controller
# Now check that the emc service has gone down
emc_service_removed = False
emc_service_found = False
cinder_services = self._openstack.get_cinder_services()
for cinder_service in cinder_services:
if '@emc' in cinder_service.host:
emc_service_found = True
if cinder_service.state == 'down':
command_args = [
'/usr/bin/cinder-manage',
'service',
'remove',
'cinder-volume',
cinder_service.host
]
with open(os.devnull, "w") as fnull:
LOG.info("Removing emc cinder-volume service")
try:
subprocess.check_call(
command_args, stdout=fnull, stderr=fnull)
emc_service_removed = True
except subprocess.CalledProcessError as e:
LOG.exception(e)
if emc_service_removed or not emc_service_found:
LOG.info("Setting EMC state to disabled")
new_state = constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLED
self.dbapi.service_parameter_update(
emc_state_param.uuid,
{'value': new_state}
)
def _hpe_audit_cinder_state(self):
"""
Complete disabling the hpe drivers by removing them from the list
of cinder services.
"""
# Only run audit if any one of the backends is enabled
hpe3par_enabled = False
try:
param = self.dbapi.service_parameter_get_one(constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED)
hpe3par_enabled = param.value.lower() == 'true'
except exception.NotFound:
pass
if not hpe3par_enabled:
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
section = "{0}{1}".format(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR, i)
try:
param = self.dbapi.service_parameter_get_one(constants.SERVICE_TYPE_CINDER,
section,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED)
hpe3par_enabled = param.value.lower() == 'true'
except exception.NotFound:
pass
if hpe3par_enabled:
break
try:
param = self.dbapi.service_parameter_get_one(constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED)
hpelefthand_enabled = param.value.lower() == 'true'
except exception.NotFound:
hpelefthand_enabled = False
if not (hpe3par_enabled or hpelefthand_enabled):
return
# Start audit
try:
hostname = socket.gethostname()
active_host = \
self.dbapi.ihost_get_by_hostname(hostname)
except Exception as e:
LOG.error(
"Failed to get local host object during cinder audit: %s",
str(e))
return
if (not (active_host and active_host.config_target and
active_host.config_applied == active_host.config_target)):
return
#
# The manifest has been applied on the active controller. Now, ensure
# that the hpe services are down.
#
hosts = [constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR,
constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND]
services = self._openstack.get_cinder_services()
for host in hosts:
status = self._hpe_get_state(host)
if status.value != "disabling":
continue
found = False
removed = False
LOG.info("Running hpe cinder state audit for %s", host)
for cinder_service in services:
if "@" + host in cinder_service.host:
found = True
if cinder_service.state == 'down':
command_args = [
'/usr/bin/cinder-manage',
'service',
'remove',
'cinder-volume',
cinder_service.host
]
with open(os.devnull, "w") as fnull:
LOG.info("Removing cinder-volume service %s" % host)
try:
subprocess.check_call(
command_args, stdout=fnull, stderr=fnull)
removed = True
except subprocess.CalledProcessError as e:
LOG.exception(e)
break
if removed or not found:
LOG.info("Setting %s state to disabled", host)
self.dbapi.service_parameter_update(status.uuid,
{"value": "disabled"})
@periodic_task.periodic_task(spacing=CONF.conductor.audit_interval)
def _conductor_audit(self, context):
# periodically, perform audit of inventory
@ -4981,10 +4781,6 @@ class ConductorManager(service.PeriodicService):
# Audit upgrade status
self._audit_upgrade_status()
self._audit_cinder_state()
self._hpe_audit_cinder_state()
hosts = self.dbapi.ihost_get_list()
# Audit install states
@ -5746,8 +5542,7 @@ class ConductorManager(service.PeriodicService):
config_dict = {
"personalities": personalities,
"classes": ['platform::drbd::runtime',
'openstack::cinder::runtime']
"classes": ['platform::drbd::runtime']
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
@ -5761,7 +5556,8 @@ class ConductorManager(service.PeriodicService):
# Update service table
self.update_service_table_for_cinder(endpoint_list, external=True)
classes = ['openstack::cinder::endpoint::runtime']
# TODO (tliu) classes may be removable from the config_dict
classes = []
config_dict = {
"personalities": personalities,
@ -5801,7 +5597,6 @@ class ConductorManager(service.PeriodicService):
'platform::lvm::controller::runtime',
'platform::haproxy::runtime',
'platform::drbd::runtime',
'openstack::cinder::runtime',
'platform::sm::norestart::runtime']
host_ids = [ctrl.uuid for ctrl in valid_ctrls]
@ -5917,8 +5712,9 @@ class ConductorManager(service.PeriodicService):
classes.append('platform::drbd::cephmon::runtime')
classes.append('platform::drbd::runtime')
# TODO (tliu) determine if this SB_SVC_CINDER section can be removed
if constants.SB_SVC_CINDER in services:
classes.append('openstack::cinder::runtime')
LOG.info("No cinder manifests for update_ceph_config")
classes.append('platform::sm::norestart::runtime')
host_ids = [ctrl.uuid for ctrl in valid_ctrls]
config_dict = {"personalities": personalities,
@ -6030,8 +5826,9 @@ class ConductorManager(service.PeriodicService):
'openstack::keystone::endpoint::runtime',
]
# TODO (tliu) determine if this SB_SVC_CINDER section can be removed
if constants.SB_SVC_CINDER in services:
classes.append('openstack::cinder::runtime')
LOG.info("No cinder manifests for update_ceph_external_config")
classes.append('platform::sm::norestart::runtime')
report_config = puppet_common.REPORT_CEPH_EXTERNAL_BACKEND_CONFIG
@ -7279,35 +7076,6 @@ class ConductorManager(service.PeriodicService):
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
elif service == constants.SERVICE_TYPE_CINDER:
self._update_emc_state()
self._hpe_update_state(constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR)
self._hpe_update_state(constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND)
# service params need to be applied to controllers that have cinder provisioned
# TODO(rchurch) make sure that we can't apply without a cinder backend.
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
valid_ctrls = [ctrl for ctrl in ctrls if
(utils.is_host_active_controller(ctrl) and
ctrl.administrative == constants.ADMIN_LOCKED and
ctrl.availability == constants.AVAILABILITY_ONLINE) or
(ctrl.administrative == constants.ADMIN_UNLOCKED and
ctrl.operational == constants.OPERATIONAL_ENABLED)]
config_dict = {
"personalities": personalities,
"classes": ['openstack::cinder::service_param::runtime'],
"host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
multipath_state_changed = self._multipath_update_state()
if multipath_state_changed:
self._config_update_hosts(context,
[constants.CONTROLLER, constants.WORKER],
reboot=True)
elif service == constants.SERVICE_TYPE_PLATFORM:
config_dict = {
"personalities": personalities,
@ -7385,141 +7153,6 @@ class ConductorManager(service.PeriodicService):
config_uuid = self._config_clear_reboot_required(config_uuid)
self._config_apply_runtime_manifest(context, config_uuid, config_dict, force=True)
def _update_emc_state(self):
emc_state_param = self._get_emc_state()
current_state = emc_state_param.value
enabled_param = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX,
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED
)
requested_state = (enabled_param.value.lower() == 'true')
if (requested_state and current_state ==
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLED):
new_state = constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED
LOG.info("Updating EMC state to %s" % new_state)
self.dbapi.service_parameter_update(
emc_state_param.uuid,
{'value': new_state}
)
elif (not requested_state and current_state ==
constants.SERVICE_PARAM_CINDER_EMC_VNX_ENABLED):
new_state = constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLING
LOG.info("Updating EMC state to %s" % new_state)
self.dbapi.service_parameter_update(
emc_state_param.uuid,
{'value': new_state}
)
def _get_emc_state(self):
try:
state = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX_STATE,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS
)
except exception.NotFound:
LOG.info("EMC state not found, setting to disabled")
values = {
'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX_STATE,
'name': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS,
'value': constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLED
}
state = self.dbapi.service_parameter_create(values)
return state
def _hpe_get_state(self, name):
section = name + '.state'
try:
parm = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER, section,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS
)
except exception.NotFound:
raise exception.SysinvException(_("Hpe section %s not "
"found" % section))
return parm
def _hpe_update_state(self, name):
do_update = False
status_param = self._hpe_get_state(name)
status = status_param.value
enabled = False
try:
enabled_param = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER, name,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED
)
enabled = (enabled_param.value.lower() == 'true')
except exception.NotFound:
pass
if not enabled and name == constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR:
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
section = "{0}{1}".format(name, i)
try:
enabled_param = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER, section,
constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED
)
enabled = (enabled_param.value.lower() == 'true')
except exception.NotFound:
pass
if enabled:
break
if enabled and status == constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLED:
do_update = True
new_state = constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED
elif not enabled and status == constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_ENABLED:
do_update = True
new_state = constants.SERVICE_PARAM_CINDER_SAN_CHANGE_STATUS_DISABLING
if do_update:
LOG.info("Updating %s to %s" % (name, new_state))
self.dbapi.service_parameter_update(status_param.uuid, {'value': new_state})
def _multipath_get_state(self):
try:
state = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT,
constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE
)
except exception.NotFound:
state = self.dbapi.service_parameter_create({
'service': constants.SERVICE_TYPE_CINDER,
'section': constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT,
'name': constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE,
'value': constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE_DISABLED
})
return state
def _multipath_update_state(self):
"""Update multipath service parameter state
:return True if multipath state changed, False otherwise
"""
state_param = self._multipath_get_state()
current_state = state_param.value
try:
state = self.dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_CINDER,
constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT,
constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH
).value
except exception.NotFound:
state = constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE_DISABLED
if current_state != state:
self.dbapi.service_parameter_update(
state_param.uuid, dict(value=state))
return True
return False
def update_sdn_controller_config(self, context):
"""Update the SDN controller configuration"""
LOG.info("update_sdn_controller_config")
@ -9545,22 +9178,6 @@ class ConductorManager(service.PeriodicService):
return pvs_dict
def cinder_has_external_backend(self, context):
"""
Check if cinder has loosely coupled external backends.
These are the possible backends: emc_vnx, hpe3par, hpelefthand
"""
pools = self._openstack.get_cinder_pools()
if pools is not None:
for pool in pools:
volume_backend = getattr(pool, 'volume_backend_name', '')
if volume_backend and volume_backend != constants.CINDER_BACKEND_LVM and \
volume_backend != constants.CINDER_BACKEND_CEPH:
return True
return False
def get_ceph_object_pool_name(self, context):
"""
Get Rados Gateway object data pool name
@ -9624,52 +9241,6 @@ class ConductorManager(service.PeriodicService):
return cinder_size
def validate_emc_removal(self, context):
"""
Check that it is safe to remove the EMC SAN
Ensure there are no volumes using the EMC endpoint
"""
emc_volume_found = False
for volume in self._openstack.get_cinder_volumes():
end_point = getattr(volume, 'os-vol-host-attr:host', '')
if end_point and '@emc_vnx' in end_point:
emc_volume_found = True
break
return not emc_volume_found
def validate_hpe3par_removal(self, context, backend):
"""
Check that it is safe to remove the HPE3PAR SAN
Ensure there are no volumes using the HPE3PAR endpoint
"""
volume_found = False
for volume in self._openstack.get_cinder_volumes():
end_point = getattr(volume, 'os-vol-host-attr:host', '')
if end_point and '@' + backend + '#' in end_point:
volume_found = True
break
return not volume_found
def validate_hpelefthand_removal(self, context):
"""
Check that it is safe to remove the HPELEFTHAND SAN
Ensure there are no volumes using the HPELEFTHAND endpoint
"""
volume_found = False
volumes = self._openstack.get_cinder_volumes()
for volume in volumes:
end_point = getattr(volume, 'os-vol-host-attr:host', '')
if end_point and '@hpelefthand' in end_point:
volume_found = True
break
return not volume_found
def region_has_ceph_backend(self, context):
"""
Send a request to the primary region to see if ceph is configured

View File

@ -1425,26 +1425,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
return self.call(context,
self.make_msg('get_cinder_partition_size'))
def validate_emc_removal(self, context):
"""
Check that it is safe to remove the EMC SAN
"""
return self.call(context, self.make_msg('validate_emc_removal'))
def validate_hpe3par_removal(self, context, backend):
"""
Check that it is safe to remove the HPE 3PAR storage array
"""
return self.call(context,
self.make_msg('validate_hpe3par_removal',
backend=backend))
def validate_hpelefthand_removal(self, context):
"""
Check that it is safe to remove the HPE Lefthand storage array
"""
return self.call(context, self.make_msg('validate_hpelefthand_removal'))
def region_has_ceph_backend(self, context):
"""
Send a request to primary region to see if ceph backend is configured
@ -1530,16 +1510,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
return self.call(context,
self.make_msg('cinder_prepare_db_for_volume_restore'))
def cinder_has_external_backend(self, context):
"""
Check if cinder has loosely coupled external backends.
These are the possible backends: emc_vnx, hpe3par, hpelefthand
:param context: request context.
"""
return self.call(context,
self.make_msg('cinder_has_external_backend'))
def get_ceph_object_pool_name(self, context):
"""
Get Rados Gateway object data pool name

View File

@ -1,800 +0,0 @@
#
# Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from sysinv.common import constants
from sysinv.common import utils
from sysinv.openstack.common import log as logging
from sysinv.puppet import openstack
LOG = logging.getLogger(__name__)
# This section is for [DEFAULT] config params that may need to be applied
SP_CINDER_DEFAULT = constants.SERVICE_PARAM_SECTION_CINDER_DEFAULT
SP_CINDER_DEFAULT_PREFIX = 'openstack::cinder::config::default'
SP_CINDER_DEFAULT_ALL_SUPPORTED_PARAMS = [
constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE,
constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH,
# Hardcoded params: params we always want set
]
# This section is for [emc_vnx] config params that may need to be applied
SP_CINDER_EMC_VNX = constants.SERVICE_PARAM_SECTION_CINDER_EMC_VNX
SP_CINDER_EMC_VNX_PREFIX = 'openstack::cinder::emc_vnx'
# The entries in CINDER_EMC_VNX_PARAMETER_REQUIRED_ON_FEATURE_ENABLED,
# CINDER_EMC_VNX_PARAMETER_PROTECTED, and
# CINDER_EMC_VNX_PARAMETER_OPTIONAL in service_parameter.py
# in sysinv package must be in the following list.
SP_CINDER_EMC_VNX_ALL_SUPPORTED_PARAMS = [
# From CINDER_EMC_VNX_PARAMETER_REQUIRED_ON_FEATURE_ENABLED
'san_ip',
# From CINDER_EMC_VNX_PARAMETER_PROTECTED list
'san_login', 'san_password',
# From CINDER_EMC_VNX_PARAMETER_OPTIONAL list
'storage_vnx_pool_names', 'storage_vnx_security_file_dir',
'san_secondary_ip', 'iscsi_initiators',
'storage_vnx_authentication_type', 'initiator_auto_deregistration',
'default_timeout', 'ignore_pool_full_threshold',
'max_luns_per_storage_group', 'destroy_empty_storage_group',
'force_delete_lun_in_storagegroup', 'io_port_list',
'check_max_pool_luns_threshold',
# Hardcoded params
'volume_backend_name', 'volume_driver', 'naviseccli_path', 'storage_protocol',
'initiator_auto_registration'
]
SP_CINDER_EMC_VNX_ALL_BLACKLIST_PARAMS = [
'control_network', 'data_network', 'data_san_ip',
]
# This section is for [hpe3par] config params that may need to be applied
SP_CINDER_HPE3PAR = constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR
SP_CINDER_HPE3PAR_PREFIX = 'openstack::cinder::hpe3par'
SP_CINDER_HPE3PAR_ALL_SUPPORTED_PARAMS = [
'hpe3par_api_url', 'hpe3par_username', 'hpe3par_password',
'hpe3par_cpg', 'hpe3par_cpg_snap', 'hpe3par_snapshot_expiration',
'hpe3par_debug', 'hpe3par_iscsi_ips', 'hpe3par_iscsi_chap_enabled',
'san_login', 'san_password', 'san_ip',
# Hardcoded params
'volume_backend_name', 'volume_driver'
]
# This section is for [hpelefthand] config params that may need to be applied
SP_CINDER_HPELEFTHAND = constants.SERVICE_PARAM_SECTION_CINDER_HPELEFTHAND
SP_CINDER_HPELEFTHAND_PREFIX = 'openstack::cinder::hpelefthand'
SP_CINDER_HPELEFTHAND_ALL_SUPPORTED_PARAMS = [
'hpelefthand_api_url', 'hpelefthand_username', 'hpelefthand_password',
'hpelefthand_clustername', 'hpelefthand_debug', 'hpelefthand_ssh_port',
'hpelefthand_iscsi_chap_enabled',
# Hardcoded params
'volume_backend_name', 'volume_driver'
]
SP_CONF_NAME_KEY = 'conf_name'
SP_PARAM_PROCESS_KEY = 'param_process'
SP_POST_PROCESS_KEY = 'post_process'
SP_PROVIDED_PARAMS_LIST_KEY = 'provided_params_list'
SP_ABSENT_PARAMS_LIST_KEY = 'absent_params_list'
#
# common section processing calls
#
def sp_common_param_process(config, section, section_map, name, value):
if SP_PROVIDED_PARAMS_LIST_KEY not in section_map:
section_map[SP_PROVIDED_PARAMS_LIST_KEY] = {}
section_map[SP_PROVIDED_PARAMS_LIST_KEY][name] = value
def sp_common_post_process(config, section, section_map, is_service_enabled,
enabled_backends, is_a_feature=True):
if section_map:
provided_params = section_map.get(SP_PROVIDED_PARAMS_LIST_KEY, {})
absent_params = section_map.get(SP_ABSENT_PARAMS_LIST_KEY, [])
conf_name = section_map.get(SP_CONF_NAME_KEY) + '::config_params'
if is_a_feature:
feature_enabled_conf = section_map.get(SP_CONF_NAME_KEY) + '::feature_enabled'
# Convert "enabled" service param to 'feature_enabled' param
config[feature_enabled_conf] = provided_params.get('enabled', 'false').lower()
if 'enabled' in provided_params:
del provided_params['enabled']
# Inform Cinder to support this storage backend as well
if config[feature_enabled_conf] == 'true':
enabled_backends.append(section)
# Reformat the params data structure to match with puppet config
# resource. This will make puppet code very simple. For example
# default Hiera file defaults.yaml has the followings for emc_vnx
#
# openstack::cinder::emc_vnx::featured_enabled: 'true'
# openstack::cinder::emc_vnx::config_params:
# emc_vnx/san_login:
# value: sysadmin
# emc_vnx/san_ip:
# value: 1.2.3.4
# emc_vnx/default_timeout:
# value: 120
# emc_vnx/san_secondary_ip:
# ensure: absent
#
# With this format, Puppet only needs to do this:
# create_resources('cinder_config', hiera_hash(
# '', {}))
provided_params_puppet_format = {}
for param, value in provided_params.items():
provided_params_puppet_format[section + '/' + param] = {
'value': value
}
for param in absent_params:
# 'ensure': 'absent' makes sure this param will be removed
# out of cinder.conf
provided_params_puppet_format[section + '/' + param] = {
'ensure': 'absent'
}
config[conf_name] = provided_params_puppet_format
#
# Section specific post processing calls: DEFAULT, emc_vnx, hpe3par, hpelefthand
#
def sp_multipath_post_process(config, provided_params):
# DEFAULT/multipath does not map 1:1 to an entry in cinder.conf
param = constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH
multipath_key = 'platform::multipath::params::enabled'
if provided_params.get(param, 'false').lower() == 'true':
config[multipath_key] = True
else:
config.pop(multipath_key, None)
provided_params.pop(param, None)
param_state = constants.SERVICE_PARAM_CINDER_DEFAULT_MULTIPATH_STATE
provided_params.pop(param_state, None)
def sp_default_post_process(config, section, section_map,
is_service_enabled, enabled_backends):
provided_params = section_map.get(SP_PROVIDED_PARAMS_LIST_KEY, {})
if not is_service_enabled:
# If the service is not enabled and there are some provided params then
# just remove all of these params as they should not be in cinder.conf
section_map[SP_PROVIDED_PARAMS_LIST_KEY] = {}
provided_params = section_map[SP_PROVIDED_PARAMS_LIST_KEY]
else:
# Special Handling:
# SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE:
# Ceph tiers: Since we may have multiple ceph backends, prioritize the
# primary backend to maintain existing behavior if a default volume
# type is not set
param = constants.SERVICE_PARAM_CINDER_DEFAULT_VOLUME_TYPE
if param not in provided_params:
if constants.CINDER_BACKEND_CEPH in enabled_backends:
provided_params[param] = constants.CINDER_BACKEND_CEPH
# Now make sure the parameters which are not in the provided_params list are
# removed out of cinder.conf
absent_params = section_map[SP_ABSENT_PARAMS_LIST_KEY] = []
for param in SP_CINDER_DEFAULT_ALL_SUPPORTED_PARAMS:
if param not in provided_params:
absent_params.append(param)
sp_multipath_post_process(config, provided_params)
sp_common_post_process(config, section, section_map, is_service_enabled,
enabled_backends, is_a_feature=False)
def sp_emc_vnx_post_process(config, section, section_map,
is_service_enabled, enabled_backends):
provided_params = section_map.get(SP_PROVIDED_PARAMS_LIST_KEY, {})
if provided_params.get('enabled', 'false').lower() == 'true':
# Supply some required parameter with default values
if 'storage_vnx_pool_names' not in provided_params:
provided_params['storage_vnx_pool_names'] = 'TiS_Pool'
if 'san_ip' not in provided_params:
provided_params['san_ip'] = ''
# if storage_vnx_security_file_dir provided than following params
# san_login, san_password, storage_vnx_authentication_type will be
# removed.
if 'storage_vnx_security_file_dir' not in provided_params:
if 'san_login' not in provided_params:
provided_params['san_login'] = 'sysadmin'
if 'san_password' not in provided_params:
provided_params['san_password'] = 'sysadmin'
else:
if 'san_login' in provided_params:
del provided_params['san_login']
if 'san_password' in provided_params:
del provided_params['san_password']
if 'storage_vnx_authentication_type' in provided_params:
del provided_params['storage_vnx_authentication_type']
if 'force_delete_lun_in_storagegroup' not in provided_params:
provided_params['force_delete_lun_in_storagegroup'] = 'True'
# Hardcoded params must exist in cinder.conf.
provided_params['volume_backend_name'] = SP_CINDER_EMC_VNX
provided_params['volume_driver'] = (
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver')
provided_params['storage_protocol'] = 'iscsi'
provided_params['naviseccli_path'] = '/opt/Navisphere/bin/naviseccli'
provided_params['initiator_auto_registration'] = 'True'
for param in SP_CINDER_EMC_VNX_ALL_BLACKLIST_PARAMS:
if param in provided_params:
del provided_params[param]
else:
# If the feature is not enabled and there are some provided params
# then just remove all of these params as they should not be in the
# cinder.conf
section_map[SP_PROVIDED_PARAMS_LIST_KEY] = {}
provided_params = section_map[SP_PROVIDED_PARAMS_LIST_KEY]
# Now make sure the parameters which are not in the provided_params list are
# removed out of cinder.conf
absent_params = section_map[SP_ABSENT_PARAMS_LIST_KEY] = []
for param in SP_CINDER_EMC_VNX_ALL_SUPPORTED_PARAMS:
if param not in provided_params:
absent_params.append(param)
sp_common_post_process(config, section, section_map, is_service_enabled,
enabled_backends)
def sp_hpe3par_post_process(config, section, section_map,
is_service_enabled, enabled_backends):
provided_params = section_map.get(SP_PROVIDED_PARAMS_LIST_KEY, {})
if provided_params.get('enabled', 'false').lower() == 'true':
# Hardcoded params must exist in cinder.conf.
provided_params['volume_backend_name'] = section
provided_params['volume_driver'] = (
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver')
else:
# If the feature is not enabled and there are some provided params
# then just remove all of these params as they should not be in the
# cinder.conf
section_map[SP_PROVIDED_PARAMS_LIST_KEY] = {}
provided_params = section_map[SP_PROVIDED_PARAMS_LIST_KEY]
# Now make sure the parameters which are not in the provided_params list are
# removed out of cinder.conf
absent_params = section_map[SP_ABSENT_PARAMS_LIST_KEY] = []
for param in SP_CINDER_HPE3PAR_ALL_SUPPORTED_PARAMS:
if param not in provided_params:
absent_params.append(param)
sp_common_post_process(config, section, section_map, is_service_enabled,
enabled_backends)
def sp_hpelefthand_post_process(config, section, section_map,
is_service_enabled, enabled_backends):
provided_params = section_map.get(SP_PROVIDED_PARAMS_LIST_KEY, {})
if provided_params.get('enabled', 'false').lower() == 'true':
# Hardcoded params must exist in cinder.conf.
provided_params['volume_backend_name'] = SP_CINDER_HPELEFTHAND
provided_params['volume_driver'] = (
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver')
else:
# If the feature is not enabled and there are some provided params
# then just remove all of these params as they should not be in the
# cinder.conf
section_map[SP_PROVIDED_PARAMS_LIST_KEY] = {}
provided_params = section_map[SP_PROVIDED_PARAMS_LIST_KEY]
# Now make sure the parameters which are not in the provided_params list are
# removed out of cinder.conf
absent_params = section_map[SP_ABSENT_PARAMS_LIST_KEY] = []
for param in SP_CINDER_HPELEFTHAND_ALL_SUPPORTED_PARAMS:
if param not in provided_params:
absent_params.append(param)
sp_common_post_process(config, section, section_map, is_service_enabled,
enabled_backends)
# For each section provided is:
# SP_CONF_NAME_KEY : The hieradata path for this section
# SP_PARAM_PROCESS_KEY: This function is invoked for every service param
# belonging to the section
# SP_POST_PROCESS_KEY : This function is invoked after each individual service
# param in the section is processed
SP_CINDER_SECTION_MAPPING = {
SP_CINDER_DEFAULT: {
SP_CONF_NAME_KEY: SP_CINDER_DEFAULT_PREFIX,
SP_PARAM_PROCESS_KEY: sp_common_param_process,
SP_POST_PROCESS_KEY: sp_default_post_process,
},
SP_CINDER_EMC_VNX: {
SP_CONF_NAME_KEY: SP_CINDER_EMC_VNX_PREFIX,
SP_PARAM_PROCESS_KEY: sp_common_param_process,
SP_POST_PROCESS_KEY: sp_emc_vnx_post_process,
},
SP_CINDER_HPE3PAR: {
SP_CONF_NAME_KEY: SP_CINDER_HPE3PAR_PREFIX,
SP_PARAM_PROCESS_KEY: sp_common_param_process,
SP_POST_PROCESS_KEY: sp_hpe3par_post_process,
},
SP_CINDER_HPELEFTHAND: {
SP_CONF_NAME_KEY: SP_CINDER_HPELEFTHAND_PREFIX,
SP_PARAM_PROCESS_KEY: sp_common_param_process,
SP_POST_PROCESS_KEY: sp_hpelefthand_post_process,
},
}
class CinderPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for cinder configuration"""
SERVICE_NAME = 'cinder'
SERVICE_TYPE = 'volume'
SERVICE_PORT = 8776
SERVICE_PATH_V1 = 'v1/%(tenant_id)s'
SERVICE_PATH_V2 = 'v2/%(tenant_id)s'
SERVICE_PATH_V3 = 'v3/%(tenant_id)s'
PROXY_SERVICE_PORT = '28776'
def __init__(self, *args, **kwargs):
super(CinderPuppet, self).__init__(*args, **kwargs)
# Update the section mapping for multiple HPE3PAR backends
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
section = "{0}{1}".format(SP_CINDER_HPE3PAR, i)
prefix = "{0}{1}".format(SP_CINDER_HPE3PAR_PREFIX, i)
SP_CINDER_SECTION_MAPPING[section] = {
SP_CONF_NAME_KEY: prefix,
SP_PARAM_PROCESS_KEY: sp_common_param_process,
SP_POST_PROCESS_KEY: sp_hpe3par_post_process,
}
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
return {
'cinder::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'cinder::db::postgresql::password': dbpass,
'cinder::keystone::auth::password': kspass,
'cinder::keystone::authtoken::password': kspass,
}
def get_system_config(self):
config_ksuser = True
ksuser = self._get_service_user_name(self.SERVICE_NAME)
service_config = None
# If we are in Region config and Cinder is a shared service
# then don't configure an account for Cinder
if self._region_config():
if self.SERVICE_TYPE in self._get_shared_services():
service_config = self._get_service_config(self.SERVICE_NAME)
config_ksuser = False
else:
ksuser += self._region_name()
config = {
'cinder::api::os_region_name': self._keystone_region_name(),
'cinder::keystone::auth::configure_user': config_ksuser,
'cinder::keystone::auth::public_url':
self.get_public_url('cinder_public_uri_v1', service_config),
'cinder::keystone::auth::internal_url':
self.get_internal_url('cinder_internal_uri_v1', service_config),
'cinder::keystone::auth::admin_url':
self.get_admin_url('cinder_admin_uri_v1', service_config),
'cinder::keystone::auth::region':
self._region_name(),
'cinder::keystone::auth::auth_name': ksuser,
'cinder::keystone::auth::tenant':
self._get_service_tenant_name(),
'cinder::keystone::auth::public_url_v2':
self.get_public_url('cinder_public_uri_v2', service_config),
'cinder::keystone::auth::internal_url_v2':
self.get_internal_url('cinder_internal_uri_v2', service_config),
'cinder::keystone::auth::admin_url_v2':
self.get_admin_url('cinder_admin_uri_v2', service_config),
'cinder::keystone::auth::public_url_v3':
self.get_public_url('cinder_public_uri_v3', service_config),
'cinder::keystone::auth::internal_url_v3':
self.get_internal_url('cinder_internal_uri_v3', service_config),
'cinder::keystone::auth::admin_url_v3':
self.get_admin_url('cinder_admin_uri_v3', service_config),
'cinder::keystone::auth::dc_region':
constants.SYSTEM_CONTROLLER_REGION,
'cinder::keystone::auth::proxy_v2_public_url':
self.get_proxy_public_url('v2'),
'cinder::keystone::auth::proxy_v3_public_url':
self.get_proxy_public_url('v3'),
'cinder::keystone::auth::proxy_v2_admin_url':
self.get_proxy_admin_url('v2'),
'cinder::keystone::auth::proxy_v3_admin_url':
self.get_proxy_admin_url('v3'),
'cinder::keystone::auth::proxy_v2_internal_url':
self.get_proxy_internal_url('v2'),
'cinder::keystone::auth::proxy_v3_internal_url':
self.get_proxy_internal_url('v3'),
'cinder::keystone::authtoken::region_name':
self._keystone_region_name(),
'cinder::keystone::authtoken::auth_url':
self._keystone_identity_uri(),
'cinder::keystone::authtoken::auth_uri':
self._keystone_auth_uri(),
'cinder::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'cinder::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'cinder::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'cinder::keystone::authtoken::username': ksuser,
'openstack::cinder::params::region_name':
self.get_region_name(),
'openstack::cinder::params::service_type':
self.get_service_type(),
'openstack::cinder::params::service_type_v2':
self.get_service_type_v2(),
'openstack::cinder::params::service_type_v3':
self.get_service_type_v3(),
}
# no need to configure cinder endpoints as the proxy provides
# the endpoints in SystemController
if (self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER):
config.update({
'cinder::keystone::auth::configure_endpoint': False,
'cinder::keystone::auth::configure_endpoint_v2': False,
'cinder::keystone::auth::configure_endpoint_v3': False,
'openstack::cinder::params::configure_endpoint': False,
})
enabled_backends = []
ceph_backend_configs = {}
ceph_type_configs = {}
is_service_enabled = False
is_ceph_external = False
for storage_backend in self.dbapi.storage_backend_get_list():
if (storage_backend.backend == constants.SB_TYPE_LVM and
(storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services)):
is_service_enabled = True
enabled_backends.append(storage_backend.backend)
lvm_type = constants.CINDER_LVM_TYPE_THIN
lvgs = self.dbapi.ilvg_get_all()
for vg in lvgs:
if vg.lvm_vg_name == constants.LVG_CINDER_VOLUMES:
lvm_type = vg.capabilities.get('lvm_type')
if lvm_type == constants.CINDER_LVM_TYPE_THICK:
lvm_type = 'default'
config.update({
'openstack::cinder::lvm::lvm_type': lvm_type,
'openstack::cinder::params::cinder_address':
self._get_cinder_address(),
'openstack::cinder::params::iscsi_ip_address':
self._format_url_address(self._get_cinder_address()),
# TODO (rchurch): Re-visit this logic to make sure that this
# information is not stale in the manifest when applied
'openstack::cinder::lvm::filesystem::drbd::drbd_handoff':
not utils.is_single_controller(self.dbapi),
})
elif storage_backend.backend == constants.SB_TYPE_CEPH:
ceph_obj = self.dbapi.storage_ceph_get(storage_backend.id)
ceph_backend = {
'backend_enabled': False,
'backend_name': constants.CINDER_BACKEND_CEPH,
'rbd_pool': constants.CEPH_POOL_VOLUMES_NAME
}
ceph_backend_type = {
'type_enabled': False,
'type_name': constants.CINDER_BACKEND_CEPH,
'backend_name': constants.CINDER_BACKEND_CEPH
}
if (ceph_obj.tier_name != constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH]):
tier_vol_backend = "{0}-{1}".format(
ceph_backend['backend_name'],
ceph_obj.tier_name)
ceph_backend['backend_name'] = tier_vol_backend
ceph_backend_type['backend_name'] = tier_vol_backend
ceph_backend['rbd_pool'] = "{0}-{1}".format(
ceph_backend['rbd_pool'], ceph_obj.tier_name)
ceph_backend_type['type_name'] = "{0}-{1}".format(
ceph_backend_type['type_name'],
ceph_obj.tier_name)
if (storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services):
is_service_enabled = True
ceph_backend['backend_enabled'] = True
ceph_backend_type['type_enabled'] = True
enabled_backends.append(ceph_backend['backend_name'])
ceph_backend_configs.update({storage_backend.name: ceph_backend})
ceph_type_configs.update({storage_backend.name: ceph_backend_type})
elif storage_backend.backend == constants.SB_TYPE_CEPH_EXTERNAL:
is_ceph_external = True
ceph_ext_obj = self.dbapi.storage_ceph_external_get(
storage_backend.id)
ceph_external_backend = {
'backend_enabled': False,
'backend_name': ceph_ext_obj.name,
'rbd_pool':
storage_backend.capabilities.get('cinder_pool'),
'rbd_ceph_conf': constants.CEPH_CONF_PATH + os.path.basename(ceph_ext_obj.ceph_conf),
}
ceph_external_backend_type = {
'type_enabled': False,
'type_name': "{0}-{1}".format(
ceph_ext_obj.name,
constants.CINDER_BACKEND_CEPH_EXTERNAL),
'backend_name': ceph_ext_obj.name
}
if (storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services):
is_service_enabled = True
ceph_external_backend['backend_enabled'] = True
ceph_external_backend_type['type_enabled'] = True
enabled_backends.append(
ceph_external_backend['backend_name'])
ceph_backend_configs.update(
{storage_backend.name: ceph_external_backend})
ceph_type_configs.update(
{storage_backend.name: ceph_external_backend_type})
# Update the params for the external SANs
config.update(self._get_service_parameter_config(is_service_enabled,
enabled_backends))
# Disable cinder services if kubernetes is enabled
is_service_enabled = False
config.update({
'openstack::cinder::params::service_enabled': is_service_enabled,
'openstack::cinder::params::enabled_backends': enabled_backends,
'openstack::cinder::params::is_ceph_external': is_ceph_external,
'openstack::cinder::backends::ceph::ceph_backend_configs':
ceph_backend_configs,
'openstack::cinder::api::backends::ceph_type_configs':
ceph_type_configs,
})
return config
def get_secure_system_config(self):
config = {
'cinder::database_connection':
self._format_database_connection(self.SERVICE_NAME),
}
return config
def get_host_config(self, host):
if (constants.CONTROLLER not in utils.get_personalities(host)):
return {}
cinder_device, cinder_size_gib = utils._get_cinder_device_info(self.dbapi, host.id)
config = {}
if cinder_device:
config.update({
'openstack::cinder::params::cinder_device': cinder_device,
'openstack::cinder::params::cinder_size': cinder_size_gib
})
return config
def get_public_url(self, version, service_config=None):
if service_config is not None:
url = service_config.capabilities.get(version, None)
if url is not None:
return url
if version == 'cinder_public_uri_v1':
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V1)
elif version == 'cinder_public_uri_v2':
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V2)
elif version == 'cinder_public_uri_v3':
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V3)
else:
return None
def get_internal_url(self, version, service_config=None):
if service_config is not None:
url = service_config.capabilities.get(version, None)
if url is not None:
return url
if version == 'cinder_internal_uri_v1':
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V1)
elif version == 'cinder_internal_uri_v2':
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V2)
elif version == 'cinder_internal_uri_v3':
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V3)
else:
return None
def get_admin_url(self, version, service_config=None):
if service_config is not None:
url = service_config.capabilities.get(version, None)
if url is not None:
return url
if version == 'cinder_admin_uri_v1':
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V1)
elif version == 'cinder_admin_uri_v2':
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V2)
elif version == 'cinder_admin_uri_v3':
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH_V3)
else:
return None
# proxies need public defined but should never use public endpoints
def get_proxy_public_url(self, version):
if version == 'v2':
return self._format_private_endpoint(self.PROXY_SERVICE_PORT,
path=self.SERVICE_PATH_V2)
elif version == 'v3':
return self._format_private_endpoint(self.PROXY_SERVICE_PORT,
path=self.SERVICE_PATH_V3)
else:
return None
def get_proxy_internal_url(self, version):
if version == 'v2':
return self._format_private_endpoint(self.PROXY_SERVICE_PORT,
path=self.SERVICE_PATH_V2)
elif version == 'v3':
return self._format_private_endpoint(self.PROXY_SERVICE_PORT,
path=self.SERVICE_PATH_V3)
else:
return None
def get_proxy_admin_url(self, version):
if version == 'v2':
return self._format_private_endpoint(self.PROXY_SERVICE_PORT,
path=self.SERVICE_PATH_V2)
elif version == 'v3':
return self._format_private_endpoint(self.PROXY_SERVICE_PORT,
path=self.SERVICE_PATH_V3)
else:
return None
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)
def _get_neutron_url(self):
return self._operator.neutron.get_internal_url()
def _get_cinder_address(self):
# obtain management network NFS address
return self._get_address_by_name(
constants.CONTROLLER_CINDER,
constants.NETWORK_TYPE_MGMT).address
def get_service_name(self):
return self._get_configured_service_name(self.SERVICE_NAME)
def get_service_type(self):
service_type = self._get_configured_service_type(self.SERVICE_NAME)
if service_type is None:
return self.SERVICE_TYPE
else:
return service_type
def get_service_name_v2(self):
return self._get_configured_service_name(self.SERVICE_NAME, 'v2')
def get_service_type_v2(self):
service_type = self._get_configured_service_type(
self.SERVICE_NAME, 'v2')
if service_type is None:
return self.SERVICE_TYPE + 'v2'
else:
return service_type
def get_service_type_v3(self):
service_type = self._get_configured_service_type(
self.SERVICE_NAME, 'v3')
if service_type is None:
return self.SERVICE_TYPE + 'v3'
else:
return service_type
def _get_service_parameter_config(self, is_service_enabled,
enabled_backends):
config = {}
service_parameters = self._get_service_parameter_configs(
constants.SERVICE_TYPE_CINDER)
if service_parameters is None:
return {}
# DEFAULT section may or may not be present therefore reset param list
SP_CINDER_SECTION_MAPPING[
SP_CINDER_DEFAULT][SP_PROVIDED_PARAMS_LIST_KEY] = {}
# Eval all currently provided parameters
for s in service_parameters:
if s.section in SP_CINDER_SECTION_MAPPING:
SP_CINDER_SECTION_MAPPING[s.section].get(
SP_PARAM_PROCESS_KEY, sp_common_param_process)(
config, s.section,
SP_CINDER_SECTION_MAPPING[s.section],
s.name, s.value)
for section, sp_section_map in SP_CINDER_SECTION_MAPPING.items():
sp_section_map.get(SP_POST_PROCESS_KEY, sp_common_post_process)(
config, section, sp_section_map,
is_service_enabled, enabled_backends)
# Build the list of possible HPE3PAR backends
possible_hpe3pars = [s for s in SP_CINDER_SECTION_MAPPING.keys()
if constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR in s]
config.update({'openstack::cinder::backends::hpe3par::sections': possible_hpe3pars})
return config
def is_service_enabled(self):
for storage_backend in self.dbapi.storage_backend_get_list():
if (storage_backend.backend == constants.SB_TYPE_LVM and
(storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services)):
return True
elif (storage_backend.backend == constants.SB_TYPE_CEPH and
(storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services)):
return True
elif (storage_backend.backend == constants.SB_TYPE_CEPH_EXTERNAL and
(storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services)):
return True
return False

View File

@ -46,7 +46,6 @@ class SystemInventoryPuppet(openstack.OpenstackBasePuppet):
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
neutron_region_name = self._operator.neutron.get_region_name()
cinder_region_name = self._operator.cinder.get_region_name()
nova_region_name = self._operator.nova.get_region_name()
barbican_region_name = self._operator.barbican.get_region_name()
@ -54,7 +53,6 @@ class SystemInventoryPuppet(openstack.OpenstackBasePuppet):
# The region in which the identity server can be found
'sysinv::region_name': self._keystone_region_name(),
'sysinv::neutron_region_name': neutron_region_name,
'sysinv::cinder_region_name': cinder_region_name,
'sysinv::nova_region_name': nova_region_name,
'sysinv::barbican_region_name': barbican_region_name,

View File

@ -202,9 +202,6 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
'nova::os_region_name':
self._operator.cinder.get_region_name(),
'nova::keystone::auth::region': self._region_name(),
'nova::keystone::auth::public_url': self.get_public_url(),
'nova::keystone::auth::internal_url': self.get_internal_url(),

View File

@ -8,7 +8,7 @@ fixtures>=0.3.14
mock<1.1.0,>=1.0
mox
passlib>=1.7.0
psycopg2
psycopg2-binary
python-barbicanclient<3.1.0,>=3.0.1
python-subunit>=0.0.18
requests-mock>=0.6.0 # Apache-2.0