Create docker-lv in the automatically created ctgs-vg volume group
This commit creates docker-lv in the automatically created cgts-vg volume group on the worker nodes if kubernetes is configured. It also supports cgts-vg expansion. This commit also increases docker-lv size to 30G for all the hosts. Story: 2004520 Task: 28663 Depends-On: https://review.openstack.org/628053 Change-Id: I9b53421318883d0e951d64d0be91e146d14e5d7b Signed-off-by: Wei Zhou <wei.zhou@windriver.com>
This commit is contained in:
parent
17f9972698
commit
6da46d861b
|
@ -223,7 +223,7 @@ class platform::filesystem::storage {
|
||||||
|
|
||||||
if $::platform::kubernetes::params::enabled {
|
if $::platform::kubernetes::params::enabled {
|
||||||
class {'platform::filesystem::docker::params' :
|
class {'platform::filesystem::docker::params' :
|
||||||
lv_size => 10
|
lv_size => 30
|
||||||
}
|
}
|
||||||
-> class {'platform::filesystem::docker' :
|
-> class {'platform::filesystem::docker' :
|
||||||
}
|
}
|
||||||
|
@ -239,7 +239,7 @@ class platform::filesystem::compute {
|
||||||
|
|
||||||
if $::platform::kubernetes::params::enabled {
|
if $::platform::kubernetes::params::enabled {
|
||||||
class {'platform::filesystem::docker::params' :
|
class {'platform::filesystem::docker::params' :
|
||||||
fs_use_all => true
|
lv_size => 30
|
||||||
}
|
}
|
||||||
-> class {'platform::filesystem::docker' :
|
-> class {'platform::filesystem::docker' :
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,12 +114,8 @@ class platform::lvm::controller::runtime {
|
||||||
###############
|
###############
|
||||||
|
|
||||||
class platform::lvm::compute::vgs {
|
class platform::lvm::compute::vgs {
|
||||||
|
include ::platform::lvm::vg::cgts_vg
|
||||||
include ::platform::lvm::vg::nova_local
|
include ::platform::lvm::vg::nova_local
|
||||||
include ::platform::kubernetes::params
|
|
||||||
|
|
||||||
if $::platform::kubernetes::params::enabled {
|
|
||||||
include ::platform::lvm::vg::cgts_vg
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class platform::lvm::compute
|
class platform::lvm::compute
|
||||||
|
|
|
@ -4138,69 +4138,6 @@ class HostController(rest.RestController):
|
||||||
"Please refer to system admin guide for more details.") %
|
"Please refer to system admin guide for more details.") %
|
||||||
(ihost['hostname']))
|
(ihost['hostname']))
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _semantic_check_cgts_storage(ihost_uuid, personality):
|
|
||||||
"""
|
|
||||||
Perform semantic checking for cgts storage on worker hosts.
|
|
||||||
CGTS VG on workers used for kubernetes docker lv only at this time.
|
|
||||||
:param ihost_uuid: uuid of host with worker functionality
|
|
||||||
:param personality: personality of host with worker functionality
|
|
||||||
"""
|
|
||||||
|
|
||||||
if personality != constants.WORKER:
|
|
||||||
return
|
|
||||||
|
|
||||||
# query volume groups
|
|
||||||
cgts_local_storage_lvg = None
|
|
||||||
ihost_ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(ihost_uuid)
|
|
||||||
for lvg in ihost_ilvgs:
|
|
||||||
if lvg.lvm_vg_name == constants.LVG_CGTS_VG:
|
|
||||||
cgts_local_storage_lvg = lvg
|
|
||||||
break
|
|
||||||
|
|
||||||
# Prevent unlock if no CGTS vg or pv volume allocated
|
|
||||||
if cgts_local_storage_lvg:
|
|
||||||
if cgts_local_storage_lvg.vg_state == constants.LVG_DEL:
|
|
||||||
raise wsme.exc.ClientSideError(
|
|
||||||
_("With kubernetes configured, "
|
|
||||||
"a worker host requires a "
|
|
||||||
"cgts volume group prior to being enabled. It is "
|
|
||||||
"currently set to be removed on unlock. Please update "
|
|
||||||
"the storage settings for the host."))
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Make sure that we have physical volumes allocated to the
|
|
||||||
# volume group
|
|
||||||
ihost_ipvs = pecan.request.dbapi.ipv_get_by_ihost(ihost_uuid)
|
|
||||||
lvg_has_pvs = False
|
|
||||||
for pv in ihost_ipvs:
|
|
||||||
if ((pv.lvm_vg_name == cgts_local_storage_lvg.lvm_vg_name) and
|
|
||||||
(pv.pv_state != constants.PV_DEL)):
|
|
||||||
|
|
||||||
lvg_has_pvs = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not lvg_has_pvs:
|
|
||||||
raise wsme.exc.ClientSideError(
|
|
||||||
_("With kubernetes configured, "
|
|
||||||
"a worker host requires a "
|
|
||||||
"cgts volume group prior to being enabled."
|
|
||||||
"The cgts volume group does not contain any "
|
|
||||||
"physical volumes in the adding or provisioned "
|
|
||||||
"state."))
|
|
||||||
else:
|
|
||||||
# This method is only called with hosts that have a worker
|
|
||||||
# subfunction and is locked or if subfunction_config action is
|
|
||||||
# being called. Without a cgts volume group, prevent
|
|
||||||
# unlocking.
|
|
||||||
|
|
||||||
msg = _('With kubernetes configured, '
|
|
||||||
'a worker host requires a cgts volume group prior to being '
|
|
||||||
'enabled. Please update the storage settings for the '
|
|
||||||
'host.')
|
|
||||||
|
|
||||||
raise wsme.exc.ClientSideError('%s' % msg)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _handle_ttys_dcd_change(ihost, ttys_dcd):
|
def _handle_ttys_dcd_change(ihost, ttys_dcd):
|
||||||
"""
|
"""
|
||||||
|
@ -4321,11 +4258,6 @@ class HostController(rest.RestController):
|
||||||
self._semantic_check_nova_local_storage(
|
self._semantic_check_nova_local_storage(
|
||||||
hostupdate.ihost_patch['uuid'],
|
hostupdate.ihost_patch['uuid'],
|
||||||
hostupdate.ihost_patch['personality'])
|
hostupdate.ihost_patch['personality'])
|
||||||
if utils.is_kubernetes_config():
|
|
||||||
# CGTS Storage checks
|
|
||||||
self._semantic_check_cgts_storage(
|
|
||||||
hostupdate.ihost_patch['uuid'],
|
|
||||||
hostupdate.ihost_patch['personality'])
|
|
||||||
else:
|
else:
|
||||||
raise wsme.exc.ClientSideError(_(
|
raise wsme.exc.ClientSideError(_(
|
||||||
"action_check unrecognized action: %s" % action))
|
"action_check unrecognized action: %s" % action))
|
||||||
|
@ -5275,11 +5207,6 @@ class HostController(rest.RestController):
|
||||||
self._semantic_check_nova_local_storage(ihost['uuid'],
|
self._semantic_check_nova_local_storage(ihost['uuid'],
|
||||||
ihost['personality'])
|
ihost['personality'])
|
||||||
|
|
||||||
# CGTS Storage checks
|
|
||||||
if utils.is_kubernetes_config():
|
|
||||||
self._semantic_check_cgts_storage(ihost['uuid'],
|
|
||||||
ihost['personality'])
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check_unlock_storage(hostupdate):
|
def check_unlock_storage(hostupdate):
|
||||||
"""Storage unlock semantic checks"""
|
"""Storage unlock semantic checks"""
|
||||||
|
|
|
@ -519,12 +519,6 @@ def _check_host(lvg):
|
||||||
"has a %s subfunction.") %
|
"has a %s subfunction.") %
|
||||||
(constants.LVG_NOVA_LOCAL,
|
(constants.LVG_NOVA_LOCAL,
|
||||||
constants.WORKER))
|
constants.WORKER))
|
||||||
elif (ihost.personality == constants.WORKER and
|
|
||||||
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
|
||||||
not utils.is_kubernetes_config()):
|
|
||||||
raise wsme.exc.ClientSideError(_("%s can not be provisioned for %s "
|
|
||||||
"hosts.") % (constants.LVG_CGTS_VG,
|
|
||||||
constants.WORKER))
|
|
||||||
elif (ihost.personality in [constants.WORKER, constants.STORAGE] and
|
elif (ihost.personality in [constants.WORKER, constants.STORAGE] and
|
||||||
lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES):
|
lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES):
|
||||||
raise wsme.exc.ClientSideError(_("%s can only be provisioned for %s "
|
raise wsme.exc.ClientSideError(_("%s can only be provisioned for %s "
|
||||||
|
@ -537,13 +531,6 @@ def _check_host(lvg):
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
||||||
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
||||||
|
|
||||||
if utils.is_kubernetes_config():
|
|
||||||
if (ihost.personality == constants.WORKER and
|
|
||||||
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
|
||||||
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
|
||||||
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
|
||||||
|
|
||||||
|
|
||||||
def _get_mate_ctrl_lvg(lvg):
|
def _get_mate_ctrl_lvg(lvg):
|
||||||
""" Return the lvg object with same VG name of mate controller """
|
""" Return the lvg object with same VG name of mate controller """
|
||||||
|
@ -595,8 +582,7 @@ def _check(op, lvg):
|
||||||
" both controllers." % {'lvm_type': constants.LVG_CINDER_PARAM_LVM_TYPE,
|
" both controllers." % {'lvm_type': constants.LVG_CINDER_PARAM_LVM_TYPE,
|
||||||
'vg_name': lvg['lvm_vg_name'],
|
'vg_name': lvg['lvm_vg_name'],
|
||||||
'type': mate_type}))
|
'type': mate_type}))
|
||||||
if (lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
|
||||||
not utils.is_kubernetes_config()):
|
|
||||||
raise wsme.exc.ClientSideError(_("%s volume group already exists") %
|
raise wsme.exc.ClientSideError(_("%s volume group already exists") %
|
||||||
constants.LVG_CGTS_VG)
|
constants.LVG_CGTS_VG)
|
||||||
elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
|
elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
|
||||||
|
|
|
@ -474,22 +474,15 @@ def _check_host(pv, ihost, op):
|
||||||
|
|
||||||
ilvg = pecan.request.dbapi.ilvg_get(ilvgid)
|
ilvg = pecan.request.dbapi.ilvg_get(ilvgid)
|
||||||
|
|
||||||
if utils.is_kubernetes_config():
|
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
||||||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
if (ihost['personality'] != constants.CONTROLLER and
|
||||||
if (ihost['personality'] != constants.CONTROLLER and
|
ihost['personality'] != constants.WORKER):
|
||||||
ihost['personality'] != constants.WORKER):
|
|
||||||
raise wsme.exc.ClientSideError(
|
|
||||||
_("Physical volume operations for %s are only "
|
|
||||||
"supported on %s and %s hosts" %
|
|
||||||
(constants.LVG_CGTS_VG,
|
|
||||||
constants.WORKER,
|
|
||||||
constants.CONTROLLER)))
|
|
||||||
elif (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
|
||||||
if ihost['personality'] != constants.CONTROLLER:
|
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Physical volume operations for %s are only supported "
|
_("Physical volume operations for %s are only "
|
||||||
"on %s hosts") % (constants.LVG_CGTS_VG,
|
"supported on %s and %s hosts" %
|
||||||
constants.CONTROLLER))
|
(constants.LVG_CGTS_VG,
|
||||||
|
constants.WORKER,
|
||||||
|
constants.CONTROLLER)))
|
||||||
|
|
||||||
# semantic check: host must be locked for a nova-local change on
|
# semantic check: host must be locked for a nova-local change on
|
||||||
# a host with a worker subfunction (worker or AIO)
|
# a host with a worker subfunction (worker or AIO)
|
||||||
|
@ -501,12 +494,11 @@ def _check_host(pv, ihost, op):
|
||||||
|
|
||||||
# semantic check: host must be locked for a CGTS change on
|
# semantic check: host must be locked for a CGTS change on
|
||||||
# a worker host.
|
# a worker host.
|
||||||
if utils.is_kubernetes_config():
|
if (ihost['personality'] == constants.WORKER and
|
||||||
if (ihost['personality'] == constants.WORKER and
|
ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
||||||
ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
||||||
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
||||||
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
|
||||||
|
|
||||||
|
|
||||||
def _get_vg_size_from_pvs(lvg, filter_pv=None):
|
def _get_vg_size_from_pvs(lvg, filter_pv=None):
|
||||||
|
@ -599,7 +591,6 @@ def _check_lvg(op, pv):
|
||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
elif op == "delete":
|
elif op == "delete":
|
||||||
# Possible Kubernetes issue, do we want to allow this on worker nodes?
|
|
||||||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Physical volumes cannot be removed from the cgts-vg volume "
|
_("Physical volumes cannot be removed from the cgts-vg volume "
|
||||||
|
|
|
@ -282,19 +282,20 @@ DEFAULT_SMALL_DISK_SIZE = 240
|
||||||
# ANCHOR_LV 1
|
# ANCHOR_LV 1
|
||||||
# DEFAULT_EXTENSION_STOR_SIZE 1
|
# DEFAULT_EXTENSION_STOR_SIZE 1
|
||||||
# DEFAULT_GNOCCHI_STOR_SIZE 5
|
# DEFAULT_GNOCCHI_STOR_SIZE 5
|
||||||
# KUBERNETES_DOCKER_STOR_SIZE (--kubernetes) 16
|
# KUBERNETES_DOCKER_STOR_SIZE (--kubernetes) 30
|
||||||
# DOCKER_DISTRIBUTION_STOR_SIZE (--kubernetes) 16
|
# DOCKER_DISTRIBUTION_STOR_SIZE (--kubernetes) 16
|
||||||
# ETCD_STOR_SIZE (--kubernetes) 5
|
# ETCD_STOR_SIZE (--kubernetes) 5
|
||||||
|
# CEPH_MON_SIZE (--kubernetes) 20
|
||||||
# buffer inside VG for LV creation 1
|
# buffer inside VG for LV creation 1
|
||||||
# root partition (created in kickstarts) 20
|
# root partition (created in kickstarts) 20
|
||||||
# boot partition (created in kickstarts) 1
|
# boot partition (created in kickstarts) 1
|
||||||
# buffer for partition creation 1
|
# buffer for partition creation 1
|
||||||
# -------------------------------------------------------
|
# -------------------------------------------------------
|
||||||
# 167
|
# 201
|
||||||
MINIMUM_DISK_SIZE = 167
|
MINIMUM_DISK_SIZE = 201
|
||||||
|
|
||||||
# Docker lv size when Kubernetes is configured
|
# Docker lv size when Kubernetes is configured
|
||||||
KUBERNETES_DOCKER_STOR_SIZE = 16
|
KUBERNETES_DOCKER_STOR_SIZE = 30
|
||||||
DOCKER_DISTRIBUTION_STOR_SIZE = 16
|
DOCKER_DISTRIBUTION_STOR_SIZE = 16
|
||||||
ETCD_STOR_SIZE = 5
|
ETCD_STOR_SIZE = 5
|
||||||
|
|
||||||
|
|
|
@ -6543,9 +6543,9 @@ class ConductorManager(service.PeriodicService):
|
||||||
# Defaults: 500G root disk
|
# Defaults: 500G root disk
|
||||||
#
|
#
|
||||||
# Min size of the cgts-vg PV is:
|
# Min size of the cgts-vg PV is:
|
||||||
# 184.0 G - PV for cgts-vg (specified in the kickstart)
|
# 218.0 G - PV for cgts-vg (specified in the kickstart)
|
||||||
# or
|
# or
|
||||||
# 192.0 G - (for DCSC non-AIO)
|
# 226.0 G - (for DCSC non-AIO)
|
||||||
# 8 G - /var/log (reserved in kickstart)
|
# 8 G - /var/log (reserved in kickstart)
|
||||||
# 8 G - /scratch (reserved in kickstart)
|
# 8 G - /scratch (reserved in kickstart)
|
||||||
# 2 G - cgcs_lv (DRBD bootstrap manifest)
|
# 2 G - cgcs_lv (DRBD bootstrap manifest)
|
||||||
|
@ -6569,23 +6569,24 @@ class ConductorManager(service.PeriodicService):
|
||||||
# 50 G - /opt/backup
|
# 50 G - /opt/backup
|
||||||
# 5 G - /opt/gnocchi
|
# 5 G - /opt/gnocchi
|
||||||
# 1 G - anchor_lv
|
# 1 G - anchor_lv
|
||||||
# 16 G - /var/lib/docker (--kubernetes)
|
# 30 G - /var/lib/docker (--kubernetes)
|
||||||
# 16 G - /var/lib/docker-distribution (--kubernetes)
|
# 16 G - /var/lib/docker-distribution (--kubernetes)
|
||||||
# 5 G - /opt/etcd (--kubernetes)
|
# 5 G - /opt/etcd (--kubernetes)
|
||||||
|
# 20 G - /var/lib/ceph/mon (--kubernetes)
|
||||||
# 8 G - /opt/patch-vault (DRBD ctlr manifest for
|
# 8 G - /opt/patch-vault (DRBD ctlr manifest for
|
||||||
# Distributed Cloud System Controller non-AIO only)
|
# Distributed Cloud System Controller non-AIO only)
|
||||||
# -----
|
# -----
|
||||||
# 192 G (for DCSC non-AIO) or 184 G
|
# 226 G (for DCSC non-AIO) or 218 G
|
||||||
#
|
#
|
||||||
# The absolute minimum disk size for these default settings:
|
# The absolute minimum disk size for these default settings:
|
||||||
# 0.5 G - /boot
|
# 0.5 G - /boot
|
||||||
# 20.0 G - /
|
# 20.0 G - /
|
||||||
# 184.0 G - cgts-vg PV
|
# 218.0 G - cgts-vg PV
|
||||||
# or 192.0 G - (DCSC non-AIO)
|
# or 226.0 G - (DCSC non-AIO)
|
||||||
# -------
|
# -------
|
||||||
# 204.5 G => ~205G min size disk
|
# 238.5 G => ~239G min size disk
|
||||||
# or
|
# or
|
||||||
# 212.5 G => ~213G min size disk
|
# 246.5 G => ~247G min size disk
|
||||||
#
|
#
|
||||||
# If required disk is size 500G:
|
# If required disk is size 500G:
|
||||||
# 1) Standard controller - will use all free space for the PV
|
# 1) Standard controller - will use all free space for the PV
|
||||||
|
@ -6596,8 +6597,8 @@ class ConductorManager(service.PeriodicService):
|
||||||
# 2) AIO - will leave unused space for further partitioning
|
# 2) AIO - will leave unused space for further partitioning
|
||||||
# 0.5 G - /boot
|
# 0.5 G - /boot
|
||||||
# 20.0 G - /
|
# 20.0 G - /
|
||||||
# 184.0 G - cgts-vg PV
|
# 218.0 G - cgts-vg PV
|
||||||
# 295.5 G - unpartitioned free space
|
# 261.5 G - unpartitioned free space
|
||||||
#
|
#
|
||||||
database_storage = constants.DEFAULT_DATABASE_STOR_SIZE
|
database_storage = constants.DEFAULT_DATABASE_STOR_SIZE
|
||||||
if glance_local:
|
if glance_local:
|
||||||
|
@ -6620,9 +6621,9 @@ class ConductorManager(service.PeriodicService):
|
||||||
# Small disk: under 240G root disk
|
# Small disk: under 240G root disk
|
||||||
#
|
#
|
||||||
# Min size of the cgts-vg PV is:
|
# Min size of the cgts-vg PV is:
|
||||||
# 144.0 G - PV for cgts-vg (specified in the kickstart)
|
# 178.0 G - PV for cgts-vg (specified in the kickstart)
|
||||||
# or
|
# or
|
||||||
# 152.0 G - (for DCSC non-AIO)
|
# 186.0 G - (for DCSC non-AIO)
|
||||||
# 8 G - /var/log (reserved in kickstart)
|
# 8 G - /var/log (reserved in kickstart)
|
||||||
# 8 G - /scratch (reserved in kickstart)
|
# 8 G - /scratch (reserved in kickstart)
|
||||||
# 2 G - cgcs_lv (DRBD bootstrap manifest)
|
# 2 G - cgcs_lv (DRBD bootstrap manifest)
|
||||||
|
@ -6646,23 +6647,24 @@ class ConductorManager(service.PeriodicService):
|
||||||
# 40 G - /opt/backup
|
# 40 G - /opt/backup
|
||||||
# 5 G - /opt/gnocchi
|
# 5 G - /opt/gnocchi
|
||||||
# 1 G - anchor_lv
|
# 1 G - anchor_lv
|
||||||
# 16 G - /var/lib/docker (--kubernetes)
|
# 30 G - /var/lib/docker (--kubernetes)
|
||||||
# 16 G - /var/lib/docker-distribution (--kubernetes)
|
# 16 G - /var/lib/docker-distribution (--kubernetes)
|
||||||
|
# 20 G - /var/lib/ceph/mon (--kubernetes)
|
||||||
# 5 G - /opt/etcd (--kubernetes)
|
# 5 G - /opt/etcd (--kubernetes)
|
||||||
# 8 G - /opt/patch-vault (DRBD ctlr manifest for DCSC non-AIO only)
|
# 8 G - /opt/patch-vault (DRBD ctlr manifest for DCSC non-AIO only)
|
||||||
# -----
|
# -----
|
||||||
# 152 G (for DCSC non-AIO) or 144 G
|
# 186 G (for DCSC non-AIO) or 178 G
|
||||||
#
|
#
|
||||||
# The absolute minimum disk size for these default settings:
|
# The absolute minimum disk size for these default settings:
|
||||||
# 0.5 G - /boot
|
# 0.5 G - /boot
|
||||||
# 20.0 G - /
|
# 20.0 G - /
|
||||||
# 144.0 G - cgts-vg PV
|
# 178.0 G - cgts-vg PV
|
||||||
# or
|
# or
|
||||||
# 152.0 G - (for DCSC non-AIO)
|
# 186.0 G - (for DCSC non-AIO)
|
||||||
# -------
|
# -------
|
||||||
# 164.5 G => ~165G min size disk
|
# 198.5 G => ~199G min size disk
|
||||||
# or
|
# or
|
||||||
# 172.5 G => ~173G min size disk
|
# 206.5 G => ~207G min size disk
|
||||||
#
|
#
|
||||||
# If required disk is size 240G:
|
# If required disk is size 240G:
|
||||||
# 1) Standard controller - will use all free space for the PV
|
# 1) Standard controller - will use all free space for the PV
|
||||||
|
@ -6672,8 +6674,8 @@ class ConductorManager(service.PeriodicService):
|
||||||
# 2) AIO - will leave unused space for further partitioning
|
# 2) AIO - will leave unused space for further partitioning
|
||||||
# 0.5 G - /boot
|
# 0.5 G - /boot
|
||||||
# 20.0 G - /
|
# 20.0 G - /
|
||||||
# 144.0 G - cgts-vg PV
|
# 178.0 G - cgts-vg PV
|
||||||
# 75.5 G - unpartitioned free space
|
# 41.5 G - unpartitioned free space
|
||||||
#
|
#
|
||||||
database_storage = \
|
database_storage = \
|
||||||
constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
|
constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
|
||||||
|
|
|
@ -203,7 +203,7 @@ class StoragePuppet(base.BasePuppet):
|
||||||
ceph_mon_devices = []
|
ceph_mon_devices = []
|
||||||
|
|
||||||
# LVM Global Filter is driven by:
|
# LVM Global Filter is driven by:
|
||||||
# - cgts-vg PVs : controllers and all storage
|
# - cgts-vg PVs : all nodes
|
||||||
# - cinder-volumes PVs: controllers
|
# - cinder-volumes PVs: controllers
|
||||||
# - nova-local PVs : controllers and all workers
|
# - nova-local PVs : controllers and all workers
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue