Remove support for nova-local lvm backend for compute hosts

This story tracks the removal of the nova-local lvm backend for compute
hosts. The lvm backend is no longer required; nova-local storage will
continue to support settings of "image" or "remote" backends.

This story will remove custom code related to lvm nova-local storage:
- the 'sysinv host-lvg-modify' command is modified:

  --instance_backing lvm parameter is removed
  ('image' and 'remote' remain)

  --instances_lv_size_gib <size> option is removed

- puppet instances_lv_size configuration is removed

- local_lvm is removed from storage host-aggregates

DocImpact
Story: 2004427
Task: 28083

Change-Id: I5443a07f8922bcab7fa22e5ff8fc2d0ff3fb109d
Signed-off-by: Jim Gauld <james.gauld@windriver.com>
This commit is contained in:
Jim Gauld 2018-11-26 15:23:27 -05:00
parent 23c5f83235
commit 61b197aa4b
13 changed files with 24 additions and 268 deletions

View File

@ -1551,7 +1551,7 @@ badMediaType (415)
:widths: 20, 20, 20, 60
"volumegroup_id", "URI", "csapi:UUID", "The unique identifier of an existing LVM volume group."
"capabilites (Optional)", "plain", "xsd:string", "A dictionary of key-value pairs prepresenting volume group parameters and values. Valid nova-local parameters are: ``instances_lv_size_mib``, ``instance_backing``, and ``concurrent_disk_operations``. Valid cinder-volumes parameters are: ``lvm_type``"
"capabilities (Optional)", "plain", "xsd:string", "A dictionary of key-value pairs prepresenting volume group parameters and values. Valid nova-local parameters are: ``instance_backing``, and ``concurrent_disk_operations``. Valid cinder-volumes parameters are: ``lvm_type``"
**Response parameters**
@ -1582,7 +1582,7 @@ badMediaType (415)
[
{
"path": "/capabilities",
"value": "{\\"instances_lv_size_mib\\": 10240}",
"value": "{}",
"op": "replace"
}
]
@ -1610,7 +1610,6 @@ badMediaType (415)
"lvm_max_pv": 0,
"updated_at": null,
"capabilities": {
"instances_lv_size_mib": 10240
},
"vg_state": "adding",
"ihost_uuid": "6b55a4c8-4194-4e3b-8d32-ca658473314e",

View File

@ -385,7 +385,6 @@ class openstack::nova::storage (
$lvm_global_filter = '[]',
$lvm_update_filter = '[]',
$instance_backing = 'image',
$instances_lv_size = 0,
$concurrent_disk_operations = 2,
$images_rbd_pool = 'ephemeral',
$images_rbd_ceph_conf = '/etc/ceph/ceph.conf'
@ -402,16 +401,6 @@ class openstack::nova::storage (
$images_volume_group = absent
$round_to_extent = false
$local_monitor_state = 'disabled'
$instances_lv_size_real = 'max'
$images_rbd_pool_real = absent
$images_rbd_ceph_conf_real = absent
}
'lvm': {
$images_type = 'lvm'
$images_volume_group = 'nova-local'
$round_to_extent = true
$local_monitor_state = 'enabled'
$instances_lv_size_real = $instances_lv_size
$images_rbd_pool_real = absent
$images_rbd_ceph_conf_real = absent
}
@ -420,7 +409,6 @@ class openstack::nova::storage (
$images_volume_group = absent
$round_to_extent = false
$local_monitor_state = 'disabled'
$instances_lv_size_real = 'max'
$images_rbd_pool_real = $images_rbd_pool
$images_rbd_ceph_conf_real = $images_rbd_ceph_conf
}
@ -472,7 +460,7 @@ class openstack::nova::storage (
ensure => 'present',
vg => 'nova-local',
pv => $final_pvs,
size => $instances_lv_size_real,
size => 'max',
round_to_extent => $round_to_extent,
allow_reduce => true,
nuke_fs_on_resize_failure => true,

View File

@ -36,12 +36,6 @@ def _print_ilvg_show(ilvg):
attr = getattr(ilvg, 'capabilities', '')
if attr:
lv_size_mib = attr.pop('instances_lv_size_mib', None)
if lv_size_mib:
lv_size_gib = float(lv_size_mib) / 1024
attr.update({'instances_lv_size_gib': lv_size_gib})
# rename capabilities for display purposes and add to display list
data.append(('parameters', attr))
@ -176,21 +170,14 @@ def do_host_lvg_delete(cc, args):
help="Name or UUID of lvg [REQUIRED]")
@utils.arg('-b', '--instance_backing',
metavar='<instance backing>',
choices=['lvm', 'image', 'remote'],
choices=['image', 'remote'],
help=("Type of instance backing. "
"Allowed values: lvm, image, remote. [nova-local]"))
"Allowed values: image, remote. [nova-local]"))
@utils.arg('-c', '--concurrent_disk_operations',
metavar='<concurrent disk operations>',
help=("Set the number of concurrent I/O intensive disk operations "
"such as glance image downloads, image format conversions, "
"etc. [nova-local]"))
@utils.arg('-s', '--instances_lv_size_gib',
metavar='<instances_lv size in GiB>',
help=("Set the desired size (in GiB) of the instances LV that is "
"used for /var/lib/nova/instances. "
"Example: For a 50GB volume, use 50. "
"Required when instance backing is \"lvm\". "
"[nova-local]"))
@utils.arg('-l', '--lvm_type',
metavar='<lvm_type>',
choices=['thick', 'thin'],
@ -201,14 +188,13 @@ def do_host_lvg_modify(cc, args):
# Get all the fields from the command arguments
field_list = ['hostnameorid', 'lvgnameoruuid',
'instance_backing', 'instances_lv_size_gib',
'concurrent_disk_operations', 'lvm_type']
'instance_backing', 'concurrent_disk_operations', 'lvm_type']
fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
all_caps_list = ['instance_backing', 'instances_lv_size_gib',
'concurrent_disk_operations', 'lvm_type']
integer_fields = ['instances_lv_size_gib', 'concurrent_disk_operations']
all_caps_list = ['instance_backing', 'concurrent_disk_operations',
'lvm_type']
integer_fields = ['concurrent_disk_operations']
requested_caps_dict = {}
for cap in all_caps_list:
@ -218,12 +204,9 @@ def do_host_lvg_modify(cc, args):
requested_caps_dict[cap] = int(fields[cap])
else:
requested_caps_dict[cap] = fields[cap]
if cap == 'instances_lv_size_gib':
requested_caps_dict['instances_lv_size_mib'] = \
requested_caps_dict.pop('instances_lv_size_gib') * 1024
except ValueError:
raise exc.CommandError('instances_lv size must be an integer '
'greater than 0: %s' % fields[cap])
raise exc.CommandError(
'{0} value {1} is invalid'.format(cap, fields[cap]))
# Get the ihost object
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)

View File

@ -355,9 +355,6 @@ def get_ilvg_config(iprofile):
for k, v in ilvg.capabilities.iteritems():
if capabilities_str != '':
capabilities_str += "; "
if k == "instances_lv_size_mib":
k = "instances_lv_size_gib"
v = v / 1024
capabilities_str += "%s: %s " % (k, v)
str += "%s, %s" % (ilvg.lvm_vg_name, capabilities_str)

View File

@ -321,7 +321,6 @@
</xs:simpleType>
<xs:simpleType name="Instance_backing">
<xs:restriction base="xs:string">
<xs:enumeration value="lvm" />
<xs:enumeration value="image" />
<xs:enumeration value="remote" />
</xs:restriction>
@ -331,7 +330,6 @@
<xs:extension base="xs:string">
<xs:attribute type="Lvm_vg_name" name="lvm_vg_name" use="required" />
<xs:attribute type="Instance_backing" name="instance_backing" use="required" />
<xs:attribute type="xs:positiveInteger" name="instances_lv_size_gib" use="optional" />
<xs:attribute type="xs:positiveInteger" name="concurrent_disk_operations" use="required" />
</xs:extension>
</xs:simpleContent>

View File

@ -151,28 +151,14 @@
<disk path="/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0" size="50" volumeFunc="osd" tier="gold"/>
</storageProfile>
<localstorageProfile name="localstorage-profile_lvm">
<!--The disk tags below define each device,
node: device node
size: minimum size (in GiB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: lvm, image, or remote
instances_lv_size_gib: local volume size in GiB
concurrent_disk_operations: number of parallel I/O intensive disk operaions
-->
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="lvm" instances_lv_size_gib="2" concurrent_disk_operations="2" />
</localstorageProfile>
<localstorageProfile name="localstorage-profile_localimage">
<!--The disk tags below define each device,
node: device node
size: minimum size (in GiB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: lvm, image, or remote
concurrent_disk_operations: number of parallel I/O intensive disk operaions
instance_backing: image, or remote
concurrent_disk_operations: number of parallel I/O intensive disk operations
-->
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="image" concurrent_disk_operations="2" />
@ -184,8 +170,8 @@
size: minimum size (in MB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: lvm, image, or remote
concurrent_disk_operations: number of parallel I/O intensive disk operaions
instance_backing: image, or remote
concurrent_disk_operations: number of parallel I/O intensive disk operations
-->
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="remote" concurrent_disk_operations="2" />

View File

@ -4048,8 +4048,9 @@ class HostController(rest.RestController):
nova_local_storage_lvg = lvg
break
# Prevent unlock if instances logical volume size is not
# provided or size needs to be adjusted
# Prevent unlock if nova-local volume group has: invalid state
# (e.g., removing), invalid instance_backing, no physical
# volumes allocated.
if nova_local_storage_lvg:
if nova_local_storage_lvg.vg_state == constants.LVG_DEL:
raise wsme.exc.ClientSideError(
@ -4081,41 +4082,9 @@ class HostController(rest.RestController):
instance_backing = lvg_capabilities.get(
constants.LVG_NOVA_PARAM_BACKING)
if instance_backing in [
if instance_backing not in [
constants.LVG_NOVA_BACKING_IMAGE,
constants.LVG_NOVA_BACKING_REMOTE]:
return
elif instance_backing == constants.LVG_NOVA_BACKING_LVM:
if constants.LVG_NOVA_PARAM_INST_LV_SZ not in lvg_capabilities:
raise wsme.exc.ClientSideError(
_("A host with compute functionality and a "
"nova-local volume group requires that a valid "
"size be specifed for the instances logical "
"volume."))
elif lvg_capabilities[constants.LVG_NOVA_PARAM_INST_LV_SZ] == 0:
raise wsme.exc.ClientSideError(
_("A host with compute functionality and a "
"nova-local volume group requires that a valid "
"size be specifed for the instances logical "
"volume. The current value is 0."))
else:
# Sanity check the current VG size against the
# current instances logical volume size in case
# PV's have been deleted
size = pv_api._get_vg_size_from_pvs(
nova_local_storage_lvg)
if (lvg_capabilities[constants.LVG_NOVA_PARAM_INST_LV_SZ] >
size):
raise wsme.exc.ClientSideError(
_("A host with compute functionality and a "
"nova-local volume group requires that a "
"valid size be specifed for the instances "
"logical volume. Current value: %d > %d.") %
(lvg_capabilities[
constants.LVG_NOVA_PARAM_INST_LV_SZ],
size))
else:
raise wsme.exc.ClientSideError(
_("A host with compute functionality and a "
"nova-local volume group requires that a valid "

View File

@ -394,7 +394,6 @@ def _cinder_volumes_patch_semantic_checks(caps_dict):
def _nova_local_patch_semantic_checks(caps_dict):
# make sure that only valid capabilities are provided
valid_caps = set([constants.LVG_NOVA_PARAM_BACKING,
constants.LVG_NOVA_PARAM_INST_LV_SZ,
constants.LVG_NOVA_PARAM_DISK_OPS])
invalid_caps = set(caps_dict.keys()) - valid_caps
@ -438,29 +437,6 @@ def _lvg_pre_patch_checks(lvg_obj, patch_obj):
set(patch_caps_dict.keys())):
patch_caps_dict[k] = current_caps_dict[k]
# Make further adjustments to the patch based on the current
# value to account for switching storage modes
if (patch_caps_dict[constants.LVG_NOVA_PARAM_BACKING] ==
constants.LVG_NOVA_BACKING_LVM):
if constants.LVG_NOVA_PARAM_INST_LV_SZ not in patch_caps_dict:
# Switched to LVM mode so set the minimum sized
# instances_lv_size_mib. This will populate it in
# horizon allowing for further configuration
vg_size_mib = pv_api._get_vg_size_from_pvs(lvg_dict)
allowed_min_mib = \
pv_api._instances_lv_min_allowed_mib(vg_size_mib)
patch_caps_dict.update({constants.LVG_NOVA_PARAM_INST_LV_SZ:
allowed_min_mib})
elif (patch_caps_dict[constants.LVG_NOVA_PARAM_BACKING] in [
constants.LVG_NOVA_BACKING_IMAGE,
constants.LVG_NOVA_BACKING_REMOTE]):
if constants.LVG_NOVA_PARAM_INST_LV_SZ in patch_caps_dict:
# Switched to image backed or remote backed modes.
# Remove the instances_lv_size_mib. It is not
# configurable as we will use the entire nova-local
# VG
del patch_caps_dict[constants.LVG_NOVA_PARAM_INST_LV_SZ]
p['value'] = patch_caps_dict
elif lvg_dict['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
for p in patch_obj:
@ -641,34 +617,6 @@ def _check(op, lvg):
_('Internal Error: %s parameter missing for volume '
'group.') % constants.LVG_NOVA_PARAM_BACKING)
else:
# Check instances_lv_size_mib
if ((lvg_caps.get(constants.LVG_NOVA_PARAM_BACKING) ==
constants.LVG_NOVA_BACKING_LVM) and
constants.LVG_NOVA_PARAM_INST_LV_SZ in lvg_caps):
# Get the volume group size
vg_size_mib = pv_api._get_vg_size_from_pvs(lvg)
# Apply a "usability" check on the value provided to make
# sure it operates within an acceptable range
allowed_min_mib = pv_api._instances_lv_min_allowed_mib(
vg_size_mib)
allowed_max_mib = pv_api._instances_lv_max_allowed_mib(
vg_size_mib)
lv_size_mib = lvg_caps[constants.LVG_NOVA_PARAM_INST_LV_SZ]
if ((lv_size_mib < allowed_min_mib) or
(lv_size_mib > allowed_max_mib)):
raise wsme.exc.ClientSideError(
_('Invalid size provided for '
'instances_lv_size_gib: %.2f. The valid range, '
'based on the volume group size is %.2f <= '
'instances_lv_size_gib <= %.2f.' %
(float(lvg_caps[constants.LVG_NOVA_PARAM_INST_LV_SZ]) / 1024,
float(allowed_min_mib) / 1024,
float(allowed_max_mib) / 1024)))
# Instances backed by remote ephemeral storage can only be
# used on systems that have a Ceph (internal or external)
# backend.
@ -785,11 +733,10 @@ def _create(lvg, iprofile=None, applyprofile=None):
if lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and not iprofile:
lvg_caps = lvg['capabilities']
if (constants.LVG_NOVA_PARAM_INST_LV_SZ in lvg_caps) or applyprofile:
if applyprofile:
# defined from create or inherit the capabilities
LOG.info("%s defined from create %s applyprofile=%s" %
(constants.LVG_NOVA_PARAM_INST_LV_SZ, lvg_caps,
applyprofile))
LOG.info("LVG create %s applyprofile=%s" %
(lvg_caps, applyprofile))
else:
lvg_caps_dict = {
constants.LVG_NOVA_PARAM_BACKING:

View File

@ -1813,41 +1813,12 @@ def _create_localstorage_profile(profile_name, profile_node):
ilvg = ilvgs_local[0]
instance_backing = ilvg.get(constants.LVG_NOVA_PARAM_BACKING)
concurrent_disk_operations = ilvg.get(constants.LVG_NOVA_PARAM_DISK_OPS)
if instance_backing == constants.LVG_NOVA_BACKING_LVM:
instances_lv_size_mib = \
int(ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB)) * 1024
if not instances_lv_size_mib:
return ("Error", _('error: importing Local Storage profile %s '
'failed.') %
profile_name, "instances_lv_size_mib required.")
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_LVM,
constants.LVG_NOVA_PARAM_INST_LV_SZ:
int(instances_lv_size_mib),
constants.LVG_NOVA_PARAM_DISK_OPS:
int(concurrent_disk_operations)}
elif instance_backing == constants.LVG_NOVA_BACKING_IMAGE:
if ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB):
return ("Error",
_('error: Local Storage profile %s is invalid')
% profile_name,
_('instances_lv_size_gib (%s) must not be set for '
'image backed instance')
% ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB))
if instance_backing == constants.LVG_NOVA_BACKING_IMAGE:
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_IMAGE,
constants.LVG_NOVA_PARAM_DISK_OPS:
int(concurrent_disk_operations)}
elif instance_backing == constants.LVG_NOVA_BACKING_REMOTE:
if ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB):
return ("Error",
_('error: Local Storage profile %s is invalid')
% profile_name,
_('instances_lv_size_gib (%s) must not be set for '
'remote backed instance')
% ilvg.get(constants.LVG_NOVA_PARAM_INST_LV_SZ_GIB))
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_REMOTE,
constants.LVG_NOVA_PARAM_DISK_OPS:

View File

@ -564,52 +564,6 @@ def _get_vg_size_from_pvs(lvg, filter_pv=None):
return size
def _instances_lv_min_allowed_mib(vg_size_mib):
# 80GB is the cutoff in the kickstart files for a virtualbox disk vs. a
# normal disk. Use a similar cutoff here for the volume group size. If the
# volume group is large enough then bump the min_mib value. The min_mib
# value is set to provide a reasonable minimum amount of space for
# /var/lib/nova/instances
# Note: A range based on this calculation is displayed in horizon to help
# provide guidance to the end user. Any changes here should be reflected
# in dashboards/admin/inventory/storages/lvg_params/views.py as well
if (vg_size_mib < (80 * 1024)):
min_mib = 2 * 1024
else:
min_mib = 5 * 1024
return min_mib
def _instances_lv_max_allowed_mib(vg_size_mib):
return vg_size_mib >> 1
def _check_instances_lv_if_deleted(lvg, ignore_pv):
# get the volume group capabilities
lvg_caps = lvg['capabilities']
# get the new volume group size assuming that the physical volume is
# removed
vg_size_mib = _get_vg_size_from_pvs(lvg, filter_pv=ignore_pv)
# Get the valid range of the instances_lv
allowed_min_mib = _instances_lv_min_allowed_mib(vg_size_mib)
allowed_max_mib = _instances_lv_max_allowed_mib(vg_size_mib)
if (constants.LVG_NOVA_PARAM_INST_LV_SZ in lvg_caps and
((lvg_caps[constants.LVG_NOVA_PARAM_INST_LV_SZ] < allowed_min_mib) or
(lvg_caps[constants.LVG_NOVA_PARAM_INST_LV_SZ] > allowed_max_mib))):
raise wsme.exc.ClientSideError(
_("Cannot delete physical volume: %s from %s. The resulting "
"volume group size would leave an invalid "
"instances_lv_size_mib: %d. The valid range, based on the new "
"volume group size is %d <= instances_lv_size_mib <= %d." %
(ignore_pv['uuid'], lvg.lvm_vg_name,
lvg_caps[constants.LVG_NOVA_PARAM_INST_LV_SZ],
allowed_min_mib, allowed_max_mib)))
def _check_lvg(op, pv):
# semantic check whether idisk is associated
ilvgid = pv.get('forilvgid') or pv.get('ilvg_uuid')
@ -645,23 +599,6 @@ def _check_lvg(op, pv):
raise wsme.exc.ClientSideError(msg)
elif op == "delete":
if (constants.LVG_NOVA_PARAM_BACKING in ilvg.capabilities and
(ilvg.capabilities[constants.LVG_NOVA_PARAM_BACKING] ==
constants.LVG_NOVA_BACKING_LVM)):
# Semantic Check: nova-local: Make sure that VG does not contain
# any instance volumes
if ((ilvg.lvm_vg_name == constants.LVG_NOVA_LOCAL) and
(ilvg.lvm_cur_lv > 1)):
raise wsme.exc.ClientSideError(
_("Can't delete physical volume: %s from %s. Instance "
"logical volumes are present in the volume group. Total "
"= %d. To remove physical volumes you must "
"terminate/migrate all instances associated with this "
"node." %
(pv['uuid'], ilvg.lvm_vg_name, ilvg.lvm_cur_lv - 1)))
_check_instances_lv_if_deleted(ilvg, pv)
# Possible Kubernetes issue, do we want to allow this on compute nodes?
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
raise wsme.exc.ClientSideError(

View File

@ -549,13 +549,10 @@ PV_NAME_UNKNOWN = 'unknown'
# Storage: Volume Group Parameter Types
LVG_NOVA_PARAM_BACKING = 'instance_backing'
LVG_NOVA_PARAM_INST_LV_SZ = 'instances_lv_size_mib'
LVG_NOVA_PARAM_INST_LV_SZ_GIB = 'instances_lv_size_gib'
LVG_NOVA_PARAM_DISK_OPS = 'concurrent_disk_operations'
LVG_CINDER_PARAM_LVM_TYPE = 'lvm_type'
# Storage: Volume Group Parameter: Nova: Backing types
LVG_NOVA_BACKING_LVM = 'lvm'
LVG_NOVA_BACKING_IMAGE = 'image'
LVG_NOVA_BACKING_REMOTE = 'remote'
@ -563,9 +560,6 @@ LVG_NOVA_BACKING_REMOTE = 'remote'
LVG_CINDER_LVM_TYPE_THIN = 'thin'
LVG_CINDER_LVM_TYPE_THICK = 'thick'
# Storage: Volume Group Parameter: Nova: Instances LV
LVG_NOVA_PARAM_INST_LV_SZ_DEFAULT = 0
# Storage: Volume Group Parameter: Nova: Concurrent Disk Ops
LVG_NOVA_PARAM_DISK_OPS_DEFAULT = 2
@ -582,8 +576,6 @@ CONTROLLER_AUDIT_REQUESTS = [DISK_AUDIT_REQUEST,
# Storage: Host Aggregates Groups
HOST_AGG_NAME_REMOTE = 'remote_storage_hosts'
HOST_AGG_META_REMOTE = 'remote'
HOST_AGG_NAME_LOCAL_LVM = 'local_storage_lvm_hosts'
HOST_AGG_META_LOCAL_LVM = 'local_lvm'
HOST_AGG_NAME_LOCAL_IMAGE = 'local_storage_image_hosts'
HOST_AGG_META_LOCAL_IMAGE = 'local_image'

View File

@ -324,7 +324,6 @@ class OpenStackOperator(object):
nova_aggset_provider.add(aggregate.name)
aggset_storage = set([
constants.HOST_AGG_NAME_LOCAL_LVM,
constants.HOST_AGG_NAME_LOCAL_IMAGE,
constants.HOST_AGG_NAME_REMOTE])
agglist_missing = list(aggset_storage - nova_aggset_provider)
@ -345,9 +344,7 @@ class OpenStackOperator(object):
# Add the metadata
try:
if agg_name == constants.HOST_AGG_NAME_LOCAL_LVM:
metadata = {'storage': constants.HOST_AGG_META_LOCAL_LVM}
elif agg_name == constants.HOST_AGG_NAME_LOCAL_IMAGE:
if agg_name == constants.HOST_AGG_NAME_LOCAL_IMAGE:
metadata = {'storage': constants.HOST_AGG_META_LOCAL_IMAGE}
else:
metadata = {'storage': constants.HOST_AGG_META_REMOTE}
@ -405,8 +402,6 @@ class OpenStackOperator(object):
agg_add_to = {
constants.LVG_NOVA_BACKING_IMAGE:
constants.HOST_AGG_NAME_LOCAL_IMAGE,
constants.LVG_NOVA_BACKING_LVM:
constants.HOST_AGG_NAME_LOCAL_LVM,
constants.LVG_NOVA_BACKING_REMOTE:
constants.HOST_AGG_NAME_REMOTE
}.get(lvg_backing)
@ -656,7 +651,6 @@ class OpenStackOperator(object):
# setup the valid set of storage aggregates for host removal
aggset_storage = set([
constants.HOST_AGG_NAME_LOCAL_LVM,
constants.HOST_AGG_NAME_LOCAL_IMAGE,
constants.HOST_AGG_NAME_REMOTE])

View File

@ -445,7 +445,6 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
pvs = self.dbapi.ipv_get_by_ihost(host.id)
instance_backing = constants.LVG_NOVA_BACKING_IMAGE
instances_lv_size = constants.LVG_NOVA_PARAM_INST_LV_SZ_DEFAULT
concurrent_disk_operations = constants.LVG_NOVA_PARAM_DISK_OPS_DEFAULT
final_pvs = []
@ -480,8 +479,6 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
constants.LVG_NOVA_PARAM_BACKING)
concurrent_disk_operations = lvg.capabilities.get(
constants.LVG_NOVA_PARAM_DISK_OPS)
instances_lv_size = lvg.capabilities.get(
constants.LVG_NOVA_PARAM_INST_LV_SZ)
global_filter, update_filter = self._get_lvm_global_filter(host)
@ -492,8 +489,6 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
'openstack::nova::storage::lvm_global_filter': global_filter,
'openstack::nova::storage::lvm_update_filter': update_filter,
'openstack::nova::storage::instance_backing': instance_backing,
'openstack::nova::storage::instances_lv_size':
"%sm" % instances_lv_size,
'openstack::nova::storage::concurrent_disk_operations':
concurrent_disk_operations, }