Merge "Create docker-lv in the automatically created ctgs-vg volume group"

This commit is contained in:
Zuul 2019-01-10 22:49:20 +00:00 committed by Gerrit Code Review
commit 4e204d7d45
8 changed files with 45 additions and 142 deletions

View File

@ -223,7 +223,7 @@ class platform::filesystem::storage {
if $::platform::kubernetes::params::enabled {
class {'platform::filesystem::docker::params' :
lv_size => 10
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
@ -239,7 +239,7 @@ class platform::filesystem::compute {
if $::platform::kubernetes::params::enabled {
class {'platform::filesystem::docker::params' :
fs_use_all => true
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}

View File

@ -114,12 +114,8 @@ class platform::lvm::controller::runtime {
###############
class platform::lvm::compute::vgs {
include ::platform::lvm::vg::cgts_vg
include ::platform::lvm::vg::nova_local
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
include ::platform::lvm::vg::cgts_vg
}
}
class platform::lvm::compute

View File

@ -4138,69 +4138,6 @@ class HostController(rest.RestController):
"Please refer to system admin guide for more details.") %
(ihost['hostname']))
@staticmethod
def _semantic_check_cgts_storage(ihost_uuid, personality):
"""
Perform semantic checking for cgts storage on worker hosts.
CGTS VG on workers used for kubernetes docker lv only at this time.
:param ihost_uuid: uuid of host with worker functionality
:param personality: personality of host with worker functionality
"""
if personality != constants.WORKER:
return
# query volume groups
cgts_local_storage_lvg = None
ihost_ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(ihost_uuid)
for lvg in ihost_ilvgs:
if lvg.lvm_vg_name == constants.LVG_CGTS_VG:
cgts_local_storage_lvg = lvg
break
# Prevent unlock if no CGTS vg or pv volume allocated
if cgts_local_storage_lvg:
if cgts_local_storage_lvg.vg_state == constants.LVG_DEL:
raise wsme.exc.ClientSideError(
_("With kubernetes configured, "
"a worker host requires a "
"cgts volume group prior to being enabled. It is "
"currently set to be removed on unlock. Please update "
"the storage settings for the host."))
else:
# Make sure that we have physical volumes allocated to the
# volume group
ihost_ipvs = pecan.request.dbapi.ipv_get_by_ihost(ihost_uuid)
lvg_has_pvs = False
for pv in ihost_ipvs:
if ((pv.lvm_vg_name == cgts_local_storage_lvg.lvm_vg_name) and
(pv.pv_state != constants.PV_DEL)):
lvg_has_pvs = True
break
if not lvg_has_pvs:
raise wsme.exc.ClientSideError(
_("With kubernetes configured, "
"a worker host requires a "
"cgts volume group prior to being enabled."
"The cgts volume group does not contain any "
"physical volumes in the adding or provisioned "
"state."))
else:
# This method is only called with hosts that have a worker
# subfunction and is locked or if subfunction_config action is
# being called. Without a cgts volume group, prevent
# unlocking.
msg = _('With kubernetes configured, '
'a worker host requires a cgts volume group prior to being '
'enabled. Please update the storage settings for the '
'host.')
raise wsme.exc.ClientSideError('%s' % msg)
@staticmethod
def _handle_ttys_dcd_change(ihost, ttys_dcd):
"""
@ -4321,11 +4258,6 @@ class HostController(rest.RestController):
self._semantic_check_nova_local_storage(
hostupdate.ihost_patch['uuid'],
hostupdate.ihost_patch['personality'])
if utils.is_kubernetes_config():
# CGTS Storage checks
self._semantic_check_cgts_storage(
hostupdate.ihost_patch['uuid'],
hostupdate.ihost_patch['personality'])
else:
raise wsme.exc.ClientSideError(_(
"action_check unrecognized action: %s" % action))
@ -5275,11 +5207,6 @@ class HostController(rest.RestController):
self._semantic_check_nova_local_storage(ihost['uuid'],
ihost['personality'])
# CGTS Storage checks
if utils.is_kubernetes_config():
self._semantic_check_cgts_storage(ihost['uuid'],
ihost['personality'])
@staticmethod
def check_unlock_storage(hostupdate):
"""Storage unlock semantic checks"""

View File

@ -519,12 +519,6 @@ def _check_host(lvg):
"has a %s subfunction.") %
(constants.LVG_NOVA_LOCAL,
constants.WORKER))
elif (ihost.personality == constants.WORKER and
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
not utils.is_kubernetes_config()):
raise wsme.exc.ClientSideError(_("%s can not be provisioned for %s "
"hosts.") % (constants.LVG_CGTS_VG,
constants.WORKER))
elif (ihost.personality in [constants.WORKER, constants.STORAGE] and
lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES):
raise wsme.exc.ClientSideError(_("%s can only be provisioned for %s "
@ -537,13 +531,6 @@ def _check_host(lvg):
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
raise wsme.exc.ClientSideError(_("Host must be locked"))
if utils.is_kubernetes_config():
if (ihost.personality == constants.WORKER and
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
(ihost['administrative'] != constants.ADMIN_LOCKED or
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
raise wsme.exc.ClientSideError(_("Host must be locked"))
def _get_mate_ctrl_lvg(lvg):
""" Return the lvg object with same VG name of mate controller """
@ -595,8 +582,7 @@ def _check(op, lvg):
" both controllers." % {'lvm_type': constants.LVG_CINDER_PARAM_LVM_TYPE,
'vg_name': lvg['lvm_vg_name'],
'type': mate_type}))
if (lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
not utils.is_kubernetes_config()):
if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
raise wsme.exc.ClientSideError(_("%s volume group already exists") %
constants.LVG_CGTS_VG)
elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:

View File

@ -474,22 +474,15 @@ def _check_host(pv, ihost, op):
ilvg = pecan.request.dbapi.ilvg_get(ilvgid)
if utils.is_kubernetes_config():
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
if (ihost['personality'] != constants.CONTROLLER and
ihost['personality'] != constants.WORKER):
raise wsme.exc.ClientSideError(
_("Physical volume operations for %s are only "
"supported on %s and %s hosts" %
(constants.LVG_CGTS_VG,
constants.WORKER,
constants.CONTROLLER)))
elif (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
if ihost['personality'] != constants.CONTROLLER:
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
if (ihost['personality'] != constants.CONTROLLER and
ihost['personality'] != constants.WORKER):
raise wsme.exc.ClientSideError(
_("Physical volume operations for %s are only supported "
"on %s hosts") % (constants.LVG_CGTS_VG,
constants.CONTROLLER))
_("Physical volume operations for %s are only "
"supported on %s and %s hosts" %
(constants.LVG_CGTS_VG,
constants.WORKER,
constants.CONTROLLER)))
# semantic check: host must be locked for a nova-local change on
# a host with a worker subfunction (worker or AIO)
@ -501,12 +494,11 @@ def _check_host(pv, ihost, op):
# semantic check: host must be locked for a CGTS change on
# a worker host.
if utils.is_kubernetes_config():
if (ihost['personality'] == constants.WORKER and
ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
(ihost['administrative'] != constants.ADMIN_LOCKED or
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
raise wsme.exc.ClientSideError(_("Host must be locked"))
if (ihost['personality'] == constants.WORKER and
ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
(ihost['administrative'] != constants.ADMIN_LOCKED or
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
raise wsme.exc.ClientSideError(_("Host must be locked"))
def _get_vg_size_from_pvs(lvg, filter_pv=None):
@ -599,7 +591,6 @@ def _check_lvg(op, pv):
raise wsme.exc.ClientSideError(msg)
elif op == "delete":
# Possible Kubernetes issue, do we want to allow this on worker nodes?
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
raise wsme.exc.ClientSideError(
_("Physical volumes cannot be removed from the cgts-vg volume "

View File

@ -282,19 +282,20 @@ DEFAULT_SMALL_DISK_SIZE = 240
# ANCHOR_LV 1
# DEFAULT_EXTENSION_STOR_SIZE 1
# DEFAULT_GNOCCHI_STOR_SIZE 5
# KUBERNETES_DOCKER_STOR_SIZE (--kubernetes) 16
# KUBERNETES_DOCKER_STOR_SIZE (--kubernetes) 30
# DOCKER_DISTRIBUTION_STOR_SIZE (--kubernetes) 16
# ETCD_STOR_SIZE (--kubernetes) 5
# CEPH_MON_SIZE (--kubernetes) 20
# buffer inside VG for LV creation 1
# root partition (created in kickstarts) 20
# boot partition (created in kickstarts) 1
# buffer for partition creation 1
# -------------------------------------------------------
# 167
MINIMUM_DISK_SIZE = 167
# 201
MINIMUM_DISK_SIZE = 201
# Docker lv size when Kubernetes is configured
KUBERNETES_DOCKER_STOR_SIZE = 16
KUBERNETES_DOCKER_STOR_SIZE = 30
DOCKER_DISTRIBUTION_STOR_SIZE = 16
ETCD_STOR_SIZE = 5

View File

@ -6543,9 +6543,9 @@ class ConductorManager(service.PeriodicService):
# Defaults: 500G root disk
#
# Min size of the cgts-vg PV is:
# 184.0 G - PV for cgts-vg (specified in the kickstart)
# 218.0 G - PV for cgts-vg (specified in the kickstart)
# or
# 192.0 G - (for DCSC non-AIO)
# 226.0 G - (for DCSC non-AIO)
# 8 G - /var/log (reserved in kickstart)
# 8 G - /scratch (reserved in kickstart)
# 2 G - cgcs_lv (DRBD bootstrap manifest)
@ -6569,23 +6569,24 @@ class ConductorManager(service.PeriodicService):
# 50 G - /opt/backup
# 5 G - /opt/gnocchi
# 1 G - anchor_lv
# 16 G - /var/lib/docker (--kubernetes)
# 30 G - /var/lib/docker (--kubernetes)
# 16 G - /var/lib/docker-distribution (--kubernetes)
# 5 G - /opt/etcd (--kubernetes)
# 20 G - /var/lib/ceph/mon (--kubernetes)
# 8 G - /opt/patch-vault (DRBD ctlr manifest for
# Distributed Cloud System Controller non-AIO only)
# -----
# 192 G (for DCSC non-AIO) or 184 G
# 226 G (for DCSC non-AIO) or 218 G
#
# The absolute minimum disk size for these default settings:
# 0.5 G - /boot
# 20.0 G - /
# 184.0 G - cgts-vg PV
# or 192.0 G - (DCSC non-AIO)
# 218.0 G - cgts-vg PV
# or 226.0 G - (DCSC non-AIO)
# -------
# 204.5 G => ~205G min size disk
# 238.5 G => ~239G min size disk
# or
# 212.5 G => ~213G min size disk
# 246.5 G => ~247G min size disk
#
# If required disk is size 500G:
# 1) Standard controller - will use all free space for the PV
@ -6596,8 +6597,8 @@ class ConductorManager(service.PeriodicService):
# 2) AIO - will leave unused space for further partitioning
# 0.5 G - /boot
# 20.0 G - /
# 184.0 G - cgts-vg PV
# 295.5 G - unpartitioned free space
# 218.0 G - cgts-vg PV
# 261.5 G - unpartitioned free space
#
database_storage = constants.DEFAULT_DATABASE_STOR_SIZE
if glance_local:
@ -6620,9 +6621,9 @@ class ConductorManager(service.PeriodicService):
# Small disk: under 240G root disk
#
# Min size of the cgts-vg PV is:
# 144.0 G - PV for cgts-vg (specified in the kickstart)
# 178.0 G - PV for cgts-vg (specified in the kickstart)
# or
# 152.0 G - (for DCSC non-AIO)
# 186.0 G - (for DCSC non-AIO)
# 8 G - /var/log (reserved in kickstart)
# 8 G - /scratch (reserved in kickstart)
# 2 G - cgcs_lv (DRBD bootstrap manifest)
@ -6646,23 +6647,24 @@ class ConductorManager(service.PeriodicService):
# 40 G - /opt/backup
# 5 G - /opt/gnocchi
# 1 G - anchor_lv
# 16 G - /var/lib/docker (--kubernetes)
# 30 G - /var/lib/docker (--kubernetes)
# 16 G - /var/lib/docker-distribution (--kubernetes)
# 20 G - /var/lib/ceph/mon (--kubernetes)
# 5 G - /opt/etcd (--kubernetes)
# 8 G - /opt/patch-vault (DRBD ctlr manifest for DCSC non-AIO only)
# -----
# 152 G (for DCSC non-AIO) or 144 G
# 186 G (for DCSC non-AIO) or 178 G
#
# The absolute minimum disk size for these default settings:
# 0.5 G - /boot
# 20.0 G - /
# 144.0 G - cgts-vg PV
# 178.0 G - cgts-vg PV
# or
# 152.0 G - (for DCSC non-AIO)
# 186.0 G - (for DCSC non-AIO)
# -------
# 164.5 G => ~165G min size disk
# 198.5 G => ~199G min size disk
# or
# 172.5 G => ~173G min size disk
# 206.5 G => ~207G min size disk
#
# If required disk is size 240G:
# 1) Standard controller - will use all free space for the PV
@ -6672,8 +6674,8 @@ class ConductorManager(service.PeriodicService):
# 2) AIO - will leave unused space for further partitioning
# 0.5 G - /boot
# 20.0 G - /
# 144.0 G - cgts-vg PV
# 75.5 G - unpartitioned free space
# 178.0 G - cgts-vg PV
# 41.5 G - unpartitioned free space
#
database_storage = \
constants.DEFAULT_SMALL_DATABASE_STOR_SIZE

View File

@ -203,7 +203,7 @@ class StoragePuppet(base.BasePuppet):
ceph_mon_devices = []
# LVM Global Filter is driven by:
# - cgts-vg PVs : controllers and all storage
# - cgts-vg PVs : all nodes
# - cinder-volumes PVs: controllers
# - nova-local PVs : controllers and all workers