Create new host_fs CLI and API

Create the new host_fs CLI commands and the APIs.
  system host-fs-list
  system host-fs-modify
  system host-fs-show

Remove the backup, scratch and the docker filesystems from the
controllerfs CLI as these are being managed by host-fs.

Changed the host’s boot_device and rootfs_device default values
to the full path name “/dev/sda”. Having “sda” only was causing
get_disk_capacity_mib() to fail to locate the disk.

The documentation for the API changes will be in another submit.

These changes were tested in Standard VBOX, Hardware AIO-DX,
Hardware with worker and also storage nodes. Installs and
configration of the labs were done.

Partial-Bug: 1830142

Change-Id: I2ca6adf9c5e9debaf0f4a23e67fadf47f2eaf670
Signed-off-by: Kristine Bujold <kristine.bujold@windriver.com>
This commit is contained in:
Kristine Bujold 2019-07-08 13:38:01 -04:00
parent 639ef13667
commit aa8b5637f3
25 changed files with 908 additions and 323 deletions

View File

@ -1,2 +1,2 @@
SRC_DIR="cgts-client"
TIS_PATCH_VER=67
TIS_PATCH_VER=68

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
@ -31,6 +31,7 @@ from cgtsclient.v1 import ethernetport
from cgtsclient.v1 import fernet
from cgtsclient.v1 import health
from cgtsclient.v1 import helm
from cgtsclient.v1 import host_fs
from cgtsclient.v1 import icommunity
from cgtsclient.v1 import icpu
from cgtsclient.v1 import idisk
@ -157,3 +158,4 @@ class Client(http.HTTPClient):
self.label = label.KubernetesLabelManager(self)
self.fernet = fernet.FernetManager(self)
self.app = app.AppManager(self)
self.host_fs = host_fs.HostFsManager(self)

View File

@ -0,0 +1,63 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# -*- encoding: utf-8 -*-
#
from cgtsclient.common import base
from cgtsclient import exc
class HostFs(base.Resource):
def __repr__(self):
return "<host_fs %s>" % self._info
class HostFsManager(base.Manager):
resource_class = HostFs
def list(self, ihost_id):
path = '/v1/ihosts/%s/host_fs' % ihost_id
return self._list(path, "host_fs")
def get(self, host_fs_id):
path = '/v1/host_fs/%s' % host_fs_id
try:
return self._list(path)[0]
except IndexError:
return None
def update(self, host_fs_id, patch):
path = '/v1/host_fs/%s' % host_fs_id
return self._update(path, patch)
def update_many(self, ihost_id, patch):
path = '/v1/ihosts/%s/host_fs/update_many' % ihost_id
resp, body = self.api.json_request(
'PUT', path, body=patch)
if body:
return self.resource_class(self, body)
def _find_fs(cc, ihost, host_fs):
if host_fs.isdigit():
try:
fs = cc.host_fs.get(host_fs)
except exc.HTTPNotFound:
raise exc.CommandError('Filesystem not found by id: %s'
% host_fs)
else:
return fs
else:
fs_list = cc.host_fs.list(ihost.uuid)
for fs in fs_list:
if fs.name == host_fs:
return fs
if fs.uuid == host_fs:
return fs
else:
raise exc.CommandError('Filesystem not found by name or '
'uuid: %s' % host_fs)

View File

@ -0,0 +1,98 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# -*- encoding: utf-8 -*-
#
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import host_fs as fs_utils
from cgtsclient.v1 import ihost as ihost_utils
def _print_fs_show(fs):
fields = ['uuid', 'name', 'size', 'logical_volume',
'created_at', 'updated_at']
labels = ['uuid', 'name', 'size', 'logical_volume',
'created_at', 'updated_at']
data = [(f, getattr(fs, f)) for f in fields]
utils.print_tuple_list(data, labels)
def _print_fs_list(cc, ihost_uuid):
fs_list = cc.host_fs.list(ihost_uuid)
field_labels = ['UUID', 'FS Name', 'Size in GiB', 'Logical Volume']
fields = ['uuid', 'name', 'size', 'logical_volume']
utils.print_list(fs_list, fields, field_labels, sortby=1)
def _find_fs(cc, ihost, uuid):
filesystems = cc.host_fs.list(ihost.uuid)
for fs in filesystems:
if fs.uuid == uuid:
break
else:
raise exc.CommandError('Filesystem not found: host %s filesystem %s' %
(ihost.hostname, uuid))
return fs
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host [REQUIRED]")
@utils.arg('fsnameoruuid',
metavar='<fs name or uuid>',
help="Name or UUID of filesystem [REQUIRED]")
def do_host_fs_show(cc, args):
"""Show details of a host filesystem"""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
fs = fs_utils._find_fs(cc, ihost, args.fsnameoruuid)
_print_fs_show(fs)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host [REQUIRED]")
def do_host_fs_list(cc, args):
"""Show list of host filesystems"""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
_print_fs_list(cc, ihost.uuid)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of the host [REQUIRED]")
@utils.arg('attributes',
metavar='<fs_name=size>',
nargs='+',
action='append',
default=[],
help="Modify host filesystem sizes")
def do_host_fs_modify(cc, args):
"""Modify the size of a Filesystem."""
patch_list = []
for attr in args.attributes[0]:
try:
patch = []
db_name, size = attr.split("=", 1)
patch.append({'op': 'replace', 'path': '/name', 'value': db_name})
patch.append({'op': 'replace', 'path': '/size', 'value': size})
patch_list.append(patch)
except ValueError:
raise exc.CommandError('Attributes must be a list of '
'FS_NAME=SIZE not "%s"' % attr)
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
try:
cc.host_fs.update_many(ihost.uuid, patch_list)
except exc.HTTPNotFound:
raise exc.CommandError('Failed to modify filesystems')
_print_fs_list(cc, ihost.uuid)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -18,6 +18,7 @@ from cgtsclient.v1 import drbdconfig_shell
from cgtsclient.v1 import ethernetport_shell
from cgtsclient.v1 import health_shell
from cgtsclient.v1 import helm_shell
from cgtsclient.v1 import host_fs_shell
from cgtsclient.v1 import icommunity_shell
from cgtsclient.v1 import icpu_shell
@ -117,6 +118,7 @@ COMMAND_MODULES = [
helm_shell,
label_shell,
app_shell,
host_fs_shell,
]

View File

@ -1,2 +1,2 @@
SRC_DIR="sysinv"
TIS_PATCH_VER=326
TIS_PATCH_VER=327

View File

@ -1255,54 +1255,64 @@ class AgentManager(service.PeriodicService):
self._prev_lvg = None
pass
# Update the filesystems
# Get the supported filesystems for this host
# Create the filesystems
filesystems = []
# check if the scratch fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_SCRATCH, self._ihost_personality):
scratch_lv_size = utils.get_controller_fs_scratch_size()
data = {
'name': constants.FILESYSTEM_NAME_SCRATCH,
'size': scratch_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_SCRATCH]
}
filesystems.append(data)
# check if the backup fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_BACKUP, self._ihost_personality):
backup_lv_size = utils.get_controller_fs_backup_size(self._ihost_rootfs_device)
data = {
'name': constants.FILESYSTEM_NAME_BACKUP,
'size': backup_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_BACKUP]
}
filesystems.append(data)
# check if the docker fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_DOCKER, self._ihost_personality):
data = {
'name': constants.FILESYSTEM_NAME_DOCKER,
'size': constants.KUBERNETES_DOCKER_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER]
}
filesystems.append(data)
if filesystems and ((self._prev_fs is None) or (self._prev_fs != filesystems)):
if self._prev_fs is None:
try:
rpcapi.create_host_filesystems(icontext,
self._ihost_uuid,
filesystems)
self._prev_fs = filesystems
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating fs"
"conductor.")
# Get the supported filesystems for this host with default
# sizes
# check if the scratch fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_SCRATCH,
self._ihost_personality):
scratch_lv_size = utils.get_current_fs_size("scratch")
data = {
'name': constants.FILESYSTEM_NAME_SCRATCH,
'size': scratch_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_SCRATCH]
}
filesystems.append(data)
# check if the backup fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_BACKUP,
self._ihost_personality):
backup_lv_size = utils.get_default_controller_fs_backup_size(self._ihost_rootfs_device)
data = {
'name': constants.FILESYSTEM_NAME_BACKUP,
'size': backup_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_BACKUP]
}
filesystems.append(data)
# check if the docker fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_DOCKER,
self._ihost_personality):
data = {
'name': constants.FILESYSTEM_NAME_DOCKER,
'size': constants.KUBERNETES_DOCKER_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER]
}
filesystems.append(data)
if filesystems:
# Create the filesystems if they do not already exist.
# This audit does not check if the fs size has changed.
# Doing so would interfere with the resizes done via
# the HostFs API
rpcapi.create_host_filesystems(icontext,
self._ihost_uuid,
filesystems)
self._prev_fs = filesystems
except Exception as e:
LOG.exception(
"Sysinv Agent exception creating the host filesystems."
" %s" % e)
self._prev_fs = None
pass
self._report_config_applied(icontext)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -82,6 +82,7 @@ from sysinv.api.controllers.v1 import system
from sysinv.api.controllers.v1 import trapdest
from sysinv.api.controllers.v1 import upgrade
from sysinv.api.controllers.v1 import user
from sysinv.api.controllers.v1 import host_fs
class MediaType(base.APIBase):
@ -245,6 +246,9 @@ class V1(base.APIBase):
apps = [link.Link]
"Links to the application resource "
host_fs = [link.Link]
"Links to the host_fs resource"
@classmethod
def convert(self):
v1 = V1()
@ -764,6 +768,13 @@ class V1(base.APIBase):
'interface_datanetworks', '',
bookmark=True)]
v1.host_fs = [link.Link.make_link('self', pecan.request.host_url,
'host_fs', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'host_fs', '',
bookmark=True)]
return v1
@ -831,6 +842,7 @@ class Controller(rest.RestController):
apps = kube_app.KubeAppController()
datanetworks = datanetwork.DataNetworkController()
interface_datanetworks = interface_datanetwork.InterfaceDataNetworkController()
host_fs = host_fs.HostFsController()
@wsme_pecan.wsexpose(V1)
def get(self):

View File

@ -36,12 +36,10 @@ from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import health
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common import log
from sysinv.openstack.common.gettextutils import _
from fm_api import constants as fm_constants
from sysinv.common.storage_backend_conf import StorageBackendConfig
@ -65,10 +63,6 @@ class ControllerFs(base.APIBase):
/var/lib/postgresql (pgsql-lv)
The image GiB of controller_fs - maps to
/opt/cgcs (cgcs-lv)
The backup GiB of controller_fs - maps to
/opt/backups (backup-lv)
The scratch GiB of controller_fs - maps to
/scratch (scratch-lv)
The extension GiB of controller_fs - maps to
/opt/extension (extension-lv)
"""
@ -206,33 +200,45 @@ def _check_relative_controller_multi_fs(controller_fs_new_list):
:return: None. Raise Client exception on failure.
"""
if cutils.is_virtual():
return
backup_gib_min = constants.BACKUP_OVERHEAD
for fs in controller_fs_new_list:
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
database_gib = fs.size
backup_gib_min += fs.size
elif fs.name == constants.FILESYSTEM_NAME_CGCS:
cgcs_gib = fs.size
backup_gib_min += fs.size
elif fs.name == constants.FILESYSTEM_NAME_BACKUP:
backup_gib = fs.size
if backup_gib < backup_gib_min:
raise wsme.exc.ClientSideError(_("backup size of %d is "
"insufficient. "
"Minimum backup size of %d is "
"required based upon glance size %d "
"and database size %d. "
"Rejecting modification "
"request." %
(backup_gib,
backup_gib_min,
cgcs_gib,
database_gib
)))
chosts = pecan.request.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for chost in chosts:
# Get the current backup size for the controller host
backup_gib = 0
hostfs_list = pecan.request.dbapi.host_fs_get_by_ihost(chost.uuid)
for host_fs in hostfs_list:
if host_fs['name'] == constants.FILESYSTEM_NAME_BACKUP:
backup_gib = host_fs['size']
break
for fs in controller_fs_new_list:
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
database_gib = fs.size
backup_gib_min += fs.size
elif fs.name == constants.FILESYSTEM_NAME_CGCS:
cgcs_gib = fs.size
backup_gib_min += fs.size
LOG.info(
"_check_relative_controller_multi_fs min backup size %s" % backup_gib_min)
if backup_gib < backup_gib_min:
raise wsme.exc.ClientSideError(_("backup size of %d is "
"insufficient for host %s. "
"Minimum backup size of %d is "
"required based upon glance size %d "
"and database size %d. "
"Rejecting modification "
"request." %
(backup_gib,
chost.hostname,
backup_gib_min,
cgcs_gib,
database_gib
)))
def _check_controller_multi_fs(controller_fs_new_list,
@ -291,47 +297,54 @@ def _check_relative_controller_fs(controller_fs_new, controller_fs_list):
:return: None. Raise Client exception on failure.
"""
if cutils.is_virtual():
return
backup_gib = 0
database_gib = 0
cgcs_gib = 0
for fs in controller_fs_list:
if controller_fs_new and fs['name'] == controller_fs_new['name']:
fs['size'] = controller_fs_new['size']
chosts = pecan.request.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
if fs['name'] == "backup":
backup_gib = fs['size']
elif fs['name'] == constants.DRBD_CGCS:
cgcs_gib = fs['size']
elif fs['name'] == "database":
database_gib = fs['size']
for chost in chosts:
# Get the current backup size for the controller host
backup_gib = 0
hostfs_list = pecan.request.dbapi.host_fs_get_by_ihost(chost.uuid)
for fs in hostfs_list:
if fs['name'] == constants.FILESYSTEM_NAME_BACKUP:
backup_gib = fs['size']
break
if backup_gib == 0:
LOG.info(
"_check_relative_controller_fs backup filesystem not yet setup")
return
for fs in controller_fs_list:
if controller_fs_new and fs['name'] == controller_fs_new['name']:
fs['size'] = controller_fs_new['size']
# Required mininum backup filesystem size
backup_gib_min = cgcs_gib + database_gib + constants.BACKUP_OVERHEAD
if fs['name'] == constants.DRBD_CGCS:
cgcs_gib = fs['size']
elif fs['name'] == constants.FILESYSTEM_NAME_DATABASE:
database_gib = fs['size']
if backup_gib < backup_gib_min:
raise wsme.exc.ClientSideError(_("backup size of %d is "
"insufficient. "
"Minimum backup size of %d is "
"required based on upon "
"glance=%d and database=%d and "
"backup overhead of %d. "
"Rejecting modification "
"request." %
(backup_gib,
backup_gib_min,
cgcs_gib,
database_gib,
constants.BACKUP_OVERHEAD
)))
if backup_gib == 0:
LOG.info(
"_check_relative_controller_fs backup filesystem not yet setup")
return
# Required mininum backup filesystem size
backup_gib_min = cgcs_gib + database_gib + constants.BACKUP_OVERHEAD
if backup_gib < backup_gib_min:
raise wsme.exc.ClientSideError(_("backup size of %d is "
"insufficient for host %s. "
"Minimum backup size of %d is "
"required based on upon "
"glance=%d and database=%d and "
"backup overhead of %d. "
"Rejecting modification "
"request." %
(backup_gib,
chost.hostname,
backup_gib_min,
cgcs_gib,
database_gib,
constants.BACKUP_OVERHEAD
)))
def _check_controller_state():
@ -343,35 +356,7 @@ def _check_controller_state():
constants.CONTROLLER)
for chost in chosts:
if (chost.administrative != constants.ADMIN_UNLOCKED or
chost.availability != constants.AVAILABILITY_AVAILABLE or
chost.operational != constants.OPERATIONAL_ENABLED):
# A node can become degraded due to not free space available in a FS
# and thus block the resize operation. If the only alarm that degrades
# a controller node is a filesystem alarm, we shouldn't block the resize
# as the resize itself will clear the degrade.
health_helper = health.Health(pecan.request.dbapi)
degrade_alarms = health_helper.get_alarms_degrade(
pecan.request.context,
alarm_ignore_list=[fm_constants.FM_ALARM_ID_FS_USAGE],
entity_instance_id_filter="controller-")
allowed_resize = False
if (not degrade_alarms and
chost.availability == constants.AVAILABILITY_DEGRADED):
allowed_resize = True
if not allowed_resize:
alarm_explanation = ""
if degrade_alarms:
alarm_explanation = "Check alarms with the following IDs: %s" % str(degrade_alarms)
raise wsme.exc.ClientSideError(
_("This operation requires controllers to be %s, %s, %s. "
"Current status is %s, %s, %s. %s." %
(constants.ADMIN_UNLOCKED, constants.OPERATIONAL_ENABLED,
constants.AVAILABILITY_AVAILABLE,
chost.administrative, chost.operational,
chost.availability, alarm_explanation)))
utils.is_host_state_valid_for_fs_resize(chost)
return True
@ -507,9 +492,7 @@ def _check_controller_multi_fs_data(context, controller_fs_list_new):
cgtsvg_growth_gib = 0
lvdisplay_keys = [constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_DATABASE],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_CGCS],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_BACKUP],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_SCRATCH]]
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_CGCS]]
lvdisplay_dict = pecan.request.rpcapi.get_controllerfs_lv_sizes(context)

View File

@ -62,6 +62,7 @@ from sysinv.api.controllers.v1 import partition
from sysinv.api.controllers.v1 import ceph_mon
from sysinv.api.controllers.v1 import interface as interface_api
from sysinv.api.controllers.v1 import lvg as lvg_api
from sysinv.api.controllers.v1 import host_fs as host_fs_api
from sysinv.api.controllers.v1 import memory
from sysinv.api.controllers.v1 import node as node_api
from sysinv.api.controllers.v1 import profile
@ -483,6 +484,9 @@ class Host(base.APIBase):
ilvgs = [link.Link]
"Links to the collection of ilvgs on this ihost"
host_fs = [link.Link]
"Links to the collection of host_fs on this ihost"
isensors = [link.Link]
"Links to the collection of isensors on this ihost"
@ -709,6 +713,18 @@ class Host(base.APIBase):
bookmark=True)
]
uhost.host_fs = [link.Link.make_link('self',
pecan.request.host_url,
'ihosts',
uhost.uuid + "/host_fs"),
link.Link.make_link(
'bookmark',
pecan.request.host_url,
'ihosts',
uhost.uuid + "/host_fs",
bookmark=True)
]
uhost.isensors = [link.Link.make_link('self',
pecan.request.host_url,
'ihosts',
@ -1041,6 +1057,9 @@ class HostController(rest.RestController):
ilvgs = lvg_api.LVGController(from_ihosts=True)
"Expose ilvgs as a sub-element of ihosts"
host_fs = host_fs_api.HostFsController(from_ihosts=True)
"Expose host_fs as a sub-element of ihosts"
addresses = address_api.AddressController(parent="ihosts")
"Expose addresses as a sub-element of ihosts"
@ -4639,7 +4658,7 @@ class HostController(rest.RestController):
except Exception as e:
raise wsme.exc.ClientSideError(
_("Restore Ceph config failed: %s" % e))
elif utils.is_aio_system(pecan.request.dbapi):
elif cutils.is_aio_system(pecan.request.dbapi):
# TODO(wz): Need more work to restore ceph for AIO
LOG.info("For an AIO system, Restore crushmap...")
try:
@ -4832,7 +4851,7 @@ class HostController(rest.RestController):
if not personality:
return
if personality == constants.WORKER and utils.is_aio_duplex_system():
if personality == constants.WORKER and cutils.is_aio_duplex_system(pecan.request.dbapi):
if utils.get_worker_count() >= constants.AIO_DUPLEX_MAX_WORKERS:
msg = _("All-in-one Duplex is restricted to "
"%s workers.") % constants.AIO_DUPLEX_MAX_WORKERS
@ -5337,7 +5356,7 @@ class HostController(rest.RestController):
elif StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.SB_TYPE_CEPH):
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
# Check if host has enough OSDs configured for each tier
tiers = pecan.request.dbapi.storage_tier_get_all()
ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH]
@ -5357,7 +5376,7 @@ class HostController(rest.RestController):
% {'replication': str(replication), 'word': word, 'tier': tier['name']})
raise wsme.exc.ClientSideError(msg)
else:
if utils.is_aio_duplex_system(pecan.request.dbapi):
if cutils.is_aio_duplex_system(pecan.request.dbapi):
host_stors = pecan.request.dbapi.istor_get_by_ihost(ihost['id'])
if not host_stors:
raise wsme.exc.ClientSideError(

View File

@ -0,0 +1,338 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import jsonpatch
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common import log
from sysinv.openstack.common.gettextutils import _
LOG = log.getLogger(__name__)
class HostFsPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class HostFs(base.APIBase):
"""API representation of a ilvg.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a host's filesystems.
"""
uuid = types.uuid
"Unique UUID for this host_fs"
name = wsme.wsattr(wtypes.text, mandatory=True)
size = int
logical_volume = wsme.wsattr(wtypes.text)
forihostid = int
"The ihostid that this host_fs belongs to"
ihost_uuid = types.uuid
"The UUID of the host this host_fs belongs to"
links = [link.Link]
"A list containing a self link and associated host_fs links"
def __init__(self, **kwargs):
self.fields = list(objects.host_fs.fields.keys())
for k in self.fields:
setattr(self, k, kwargs.get(k))
# API-only attribute)
self.fields.append('action')
setattr(self, 'action', kwargs.get('action', None))
@classmethod
def convert_with_links(cls, rpc_host_fs, expand=True):
host_fs = HostFs(**rpc_host_fs.as_dict())
if not expand:
host_fs.unset_fields_except(['uuid', 'name', 'size',
'logical_volume',
'created_at', 'updated_at',
'ihost_uuid', 'forihostid'])
# never expose the ihost_id attribute, allow exposure for now
host_fs.forihostid = wtypes.Unset
host_fs.links = [link.Link.make_link('self', pecan.request.host_url,
'host_fs', host_fs.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'host_fs', host_fs.uuid,
bookmark=True)]
return host_fs
class HostFsCollection(collection.Collection):
"""API representation of a collection of host_fs."""
host_fs = [HostFs]
"A list containing host_fs objects"
def __init__(self, **kwargs):
self._type = 'host_fs'
@classmethod
def convert_with_links(cls, rpc_host_fs, limit, url=None,
expand=False, **kwargs):
collection = HostFsCollection()
collection.host_fs = [HostFs.convert_with_links(p, expand)
for p in rpc_host_fs]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
def _calculate_requested_growth(host_fs_list, host_fs_list_new):
""" Check host filesystem data and return growth
returns: cgtsvg_growth_gib
"""
cgtsvg_growth_gib = 0
for fs in host_fs_list_new:
for fs_current in host_fs_list:
if fs_current.name == fs.name:
orig = int(float(fs_current.size))
new = int(fs.size)
LOG.info(
"_calculate_requested_growth orig=%s: %s" % (orig, new))
if orig > new:
raise wsme.exc.ClientSideError(_("'%s' must be at least: "
"%s" % (fs.name, orig)))
cgtsvg_growth_gib += (new - orig)
return cgtsvg_growth_gib
LOCK_NAME = 'HostFsController'
class HostFsController(rest.RestController):
"""REST controller for host_fs."""
_custom_actions = {
'detail': ['GET'],
'update_many': ['PUT'],
}
def __init__(self, from_ihosts=False):
self._from_ihosts = from_ihosts
def _get_host_fs_collection(self, ihost_uuid, marker, limit, sort_key,
sort_dir, expand=False, resource_url=None):
if self._from_ihosts and not ihost_uuid:
raise exception.InvalidParameterValue(_(
"Host id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.lvg.get_by_uuid(
pecan.request.context,
marker)
if ihost_uuid:
host_fs = pecan.request.dbapi.host_fs_get_by_ihost(
ihost_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
host_fs = pecan.request.dbapi.host_fs_get_list(limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return HostFsCollection.convert_with_links(host_fs, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(HostFsCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, ihost_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of filesystems for the given host."""
return self._get_host_fs_collection(ihost_uuid, marker, limit,
sort_key, sort_dir)
@wsme_pecan.wsexpose(HostFsCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, ihost_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of filesystems for the given host with detail."""
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "host_fs":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['host_fs', 'detail'])
return self._get_host_fs_collection(ihost_uuid,
marker, limit,
sort_key, sort_dir,
expand, resource_url)
@wsme_pecan.wsexpose(HostFs, types.uuid)
def get_one(self, host_fs_uuid):
"""Retrieve the filesystem information about the given host."""
LOG.info("get one: %s" % host_fs_uuid)
if self._from_ihosts:
raise exception.OperationNotPermitted
rpc_host_fs = objects.lvg.get_by_uuid(pecan.request.context,
host_fs_uuid)
return HostFs.convert_with_links(rpc_host_fs)
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [HostFsPatchType])
@wsme_pecan.wsexpose(HostFs, types.uuid,
body=[HostFsPatchType])
def patch(self, host_fs_uuid, patch):
raise exception.OperationNotPermitted
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [HostFsPatchType])
@wsme_pecan.wsexpose(None, types.uuid, body=[[HostFsPatchType]])
def update_many(self, ihost_uuid, patch):
"""Update existing filesystems for a host."""
LOG.info("patch_data: %s" % patch)
# Validate input filesystem names
current_host_fs_list = pecan.request.dbapi.host_fs_get_by_ihost(ihost_uuid)
host = pecan.request.dbapi.ihost_get(ihost_uuid)
modified_fs = []
for p_list in patch:
p_obj_list = jsonpatch.JsonPatch(p_list)
for p_obj in p_obj_list:
if p_obj['path'] == '/action':
value = p_obj['value']
patch.remove(p_list)
for p_list in patch:
p_obj_list = jsonpatch.JsonPatch(p_list)
for p_obj in p_obj_list:
if p_obj['path'] == '/name':
fs_display_name = p_obj['value']
fs_name = fs_display_name
elif p_obj['path'] == '/size':
size = p_obj['value']
if fs_name not in [fs['name'] for fs in current_host_fs_list]:
msg = _("HostFs update failed: invalid filesystem "
"'%s' " % fs_display_name)
raise wsme.exc.ClientSideError(msg)
elif not cutils.is_int_like(size):
msg = _("HostFs update failed: filesystem '%s' "
"size must be an integer " % fs_display_name)
raise wsme.exc.ClientSideError(msg)
current_size = [fs['size'] for
fs in current_host_fs_list
if fs['name'] == fs_name][0]
if int(size) <= int(current_size):
msg = _("HostFs update failed: size for filesystem '%s' "
"should be bigger than %s " % (
fs_display_name, current_size))
raise wsme.exc.ClientSideError(msg)
modified_fs += [fs_name]
host_fs_list_new = []
for fs in current_host_fs_list:
replaced = False
for p_list in patch:
p_obj_list = jsonpatch.JsonPatch(p_list)
for p_obj in p_obj_list:
if p_obj['value'] == fs['name']:
try:
host_fs_list_new += [HostFs(
**jsonpatch.apply_patch(fs.as_dict(), p_obj_list))]
replaced = True
break
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=p_list, reason=e)
if replaced:
break
if not replaced:
host_fs_list_new += [fs]
requested_growth_gib = \
_calculate_requested_growth(current_host_fs_list, host_fs_list_new)
LOG.info("Requested growth in GiB: %s" % requested_growth_gib)
if utils.is_host_state_valid_for_fs_resize(host):
cgtsvg_free_space_gib = utils.get_node_cgtsvg_limit(host)
if requested_growth_gib > cgtsvg_free_space_gib:
msg = _("HostFs update failed: Not enough free space on %s. "
"Current free space %s GiB, "
"requested total increase %s GiB" %
(constants.LVG_CGTS_VG, cgtsvg_free_space_gib, requested_growth_gib))
LOG.warning(msg)
raise wsme.exc.ClientSideError(msg)
for fs in host_fs_list_new:
if fs.name in modified_fs:
value = {'size': fs.size}
pecan.request.dbapi.host_fs_update(fs.uuid, value)
try:
# perform rpc to conductor to perform config apply
pecan.request.rpcapi.update_host_filesystem_config(
pecan.request.context,
host=host,
filesystem_list=modified_fs,)
except Exception as e:
msg = _("Failed to update filesystem size for %s" % host.name)
LOG.error("%s with patch %s with exception %s" % (msg, patch, e))
raise wsme.exc.ClientSideError(msg)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, host_fs_uuid):
"""Delete a host_fs."""
raise exception.OperationNotPermitted
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(HostFs, body=HostFs)
def post(self, host_fs):
"""Create a new host_fs."""
raise exception.OperationNotPermitted

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -19,7 +19,6 @@ from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import patch_api
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
@ -345,7 +344,7 @@ class KubeAppController(rest.RestController):
pecan.request.context, name, version)
target_app.status = constants.APP_UPDATE_IN_PROGRESS
target_app.save()
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
operation = constants.APP_APPLY_OP
else:
operation = constants.APP_ROLLBACK_OP

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
@ -573,7 +573,7 @@ def _check_host(stor):
"System must have a %s backend" % constants.SB_TYPE_CEPH))
# semantic check: whether at least 2 unlocked hosts are monitors
if not utils.is_aio_system(pecan.request.dbapi):
if not cutils.is_aio_system(pecan.request.dbapi):
ceph_helper = ceph.CephApiOperator()
num_monitors, required_monitors, __ = \
ceph_helper.get_monitors_status(pecan.request.dbapi)

View File

@ -386,7 +386,7 @@ def _discover_and_validate_backend_config_data(caps_dict, confirmed):
raise wsme.exc.ClientSideError("Missing required backend "
"parameter: %s" % k)
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
supported_replication = constants.AIO_SX_CEPH_REPLICATION_FACTOR_SUPPORTED
else:
supported_replication = constants.CEPH_REPLICATION_FACTOR_SUPPORTED
@ -585,7 +585,7 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False):
if not confirmed and api_helper.is_primary_ceph_tier(tier.name):
_options_str = _get_options_string(storage_ceph)
replication = capabilities[constants.CEPH_BACKEND_REPLICATION_CAP]
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
what = 'osds'
else:
what = 'storage nodes'
@ -666,7 +666,7 @@ def _check_and_update_rbd_provisioner(new_storceph, remove=False):
validate_k8s_namespaces(K8RbdProvisioner.getListFromNamespaces(new_storceph))
# Check if cluster is configured
if not utils.is_aio_system(pecan.request.dbapi):
if not cutils.is_aio_system(pecan.request.dbapi):
# On multinode is enough if storage hosts are available
storage_hosts = pecan.request.dbapi.ihost_get_by_personality(
constants.STORAGE
@ -710,7 +710,7 @@ def _apply_backend_changes(op, sb_obj):
def _set_defaults(storage_ceph):
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
def_replication = str(constants.AIO_SX_CEPH_REPLICATION_FACTOR_DEFAULT)
else:
def_replication = str(constants.CEPH_REPLICATION_FACTOR_DEFAULT)
@ -770,7 +770,7 @@ def _set_defaults(storage_ceph):
# set state and task accordingly.
if sc['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
sc['state'] = constants.SB_STATE_CONFIGURED
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
sc['task'] = None
else:
sc['task'] = constants.SB_TASK_PROVISION_STORAGE
@ -921,7 +921,7 @@ def _pre_patch_checks(storage_ceph_obj, patch_obj):
def _check_replication_number(new_cap, orig_cap):
ceph_task = StorageBackendConfig.get_ceph_backend_task(pecan.request.dbapi)
ceph_state = StorageBackendConfig.get_ceph_backend_state(pecan.request.dbapi)
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
# On single node install we allow both increasing and decreasing
# replication on the fly.
if ceph_state != constants.SB_STATE_CONFIGURED:
@ -932,7 +932,7 @@ def _check_replication_number(new_cap, orig_cap):
(ceph_state, constants.SB_STATE_CONFIGURED)))
else:
if utils.is_aio_duplex_system(pecan.request.dbapi):
if cutils.is_aio_duplex_system(pecan.request.dbapi):
# Replication change is not allowed on two node configuration
raise wsme.exc.ClientSideError(
_("Can not modify ceph replication factor on "
@ -1168,7 +1168,7 @@ def _update_pool_quotas(storceph):
def _check_object_gateway_install(dbapi):
# Ensure we have the required number of monitors
if utils.is_aio_system(dbapi):
if cutils.is_aio_system(dbapi):
api_helper.check_minimal_number_of_controllers(1)
else:
api_helper.check_minimal_number_of_controllers(2)

View File

@ -417,7 +417,7 @@ def _check(self, op, tier):
"initial configuration to be complete and controller node unlocked.")
raise wsme.exc.ClientSideError(msg)
if utils.is_aio_system(pecan.request.dbapi):
if cutils.is_aio_system(pecan.request.dbapi):
# Deny adding secondary tiers if primary tier backend is not configured
# for cluster.
clusterId = tier.get('forclusterid') or tier.get('cluster_uuid')

View File

@ -32,10 +32,13 @@ import tsconfig.tsconfig as tsc
from oslo_config import cfg
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import health
from sysinv.helm import common as helm_common
from sysinv.openstack.common.gettextutils import _
from sysinv.openstack.common import log
from fm_api import constants as fm_constants
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@ -250,6 +253,46 @@ def is_aio_simplex_host_unlocked(host):
host['invprovision'] != constants.PROVISIONING)
def is_host_state_valid_for_fs_resize(host):
"""
This function verifies the administrative, operational, availability of
each host.
"""
if (host.administrative != constants.ADMIN_UNLOCKED or
host.availability != constants.AVAILABILITY_AVAILABLE or
host.operational != constants.OPERATIONAL_ENABLED):
# A node can become degraded due to not free space available in a
# FS and thus block the resize operation. If the only alarm that
# degrades a node is a filesystem alarm, we shouldn't block the
# resize as the resize itself will clear the degrade.
health_helper = health.Health(pecan.request.dbapi)
degrade_alarms = health_helper.get_alarms_degrade(
pecan.request.context,
alarm_ignore_list=[fm_constants.FM_ALARM_ID_FS_USAGE],
entity_instance_id_filter=(host.personality + "-"))
allowed_resize = False
if (not degrade_alarms and
host.availability == constants.AVAILABILITY_DEGRADED):
allowed_resize = True
if not allowed_resize:
alarm_explanation = ""
if degrade_alarms:
alarm_explanation = "Check alarms with the following IDs: %s" % str(degrade_alarms)
raise wsme.exc.ClientSideError(
_("This operation requires controllers to be %s, %s, %s. "
"Current status is %s, %s, %s. %s." %
(constants.ADMIN_UNLOCKED, constants.OPERATIONAL_ENABLED,
constants.AVAILABILITY_AVAILABLE,
host.administrative, host.operational,
host.availability, alarm_explanation)))
return True
def get_vswitch_type():
system = pecan.request.dbapi.isystem_get_one()
return system.capabilities.get('vswitch_type')
@ -409,36 +452,43 @@ def get_distributed_cloud_role(dbapi=None):
return system.distributed_cloud_role
def is_aio_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD)
def is_aio_simplex_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD and
system.system_mode == constants.SYSTEM_MODE_SIMPLEX)
def is_aio_duplex_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD and
(system.system_mode == constants.SYSTEM_MODE_DUPLEX or
system.system_mode == constants.SYSTEM_MODE_DUPLEX_DIRECT))
def get_worker_count(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
return len(dbapi.ihost_get_by_personality(constants.WORKER))
def get_node_cgtsvg_limit(host):
"""Calculate free space for host filesystem
returns: cgtsvg_max_free_gib
"""
cgtsvg_free_mib = 0
ipvs = pecan.request.dbapi.ipv_get_by_ihost(host.uuid)
for ipv in ipvs:
if (ipv.lvm_vg_name == constants.LVG_CGTS_VG and
ipv.pv_state != constants.PROVISIONED):
msg = _(
"There are still unprovisioned physical volumes on '%s'. "
"Cannot perform operation." % host.hostname)
raise wsme.exc.ClientSideError(msg)
ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(host.uuid)
for ilvg in ilvgs:
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):
cgtsvg_free_mib = (int(ilvg.lvm_vg_size) * int(
ilvg.lvm_vg_free_pe)
/ int(ilvg.lvm_vg_total_pe)) / (1024 * 1024)
break
cgtsvg_max_free_gib = cgtsvg_free_mib / 1024
LOG.info(
"get_node_cgtsvg_limit cgtsvg_max_free_gib=%s" % cgtsvg_max_free_gib)
return cgtsvg_max_free_gib
class SBApiHelper(object):
""" API Helper Class for manipulating Storage Backends.

View File

@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016, 2018 Wind River Systems, Inc.
# Copyright (c) 2016, 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -19,14 +19,11 @@ import requests
from cephclient import wrapper as ceph
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv.openstack.common import log as logging
from sysinv.api.controllers.v1.utils import is_aio_system
LOG = logging.getLogger(__name__)
@ -371,7 +368,7 @@ class CephApiOperator(object):
self._crushmap_root_mirror(self._default_tier, tier.name)
except exception.CephCrushTierAlreadyExists:
pass
if utils.is_aio_simplex_system(pecan.request.dbapi):
if cutils.is_aio_simplex_system(pecan.request.dbapi):
# Since we have a single host replication is done on OSDs
# to ensure disk based redundancy.
replicate_by = 'osd'
@ -630,7 +627,7 @@ class CephApiOperator(object):
def get_monitors_status(self, db_api):
num_inv_monitors = 0
if utils.is_aio_system(pecan.request.dbapi):
if cutils.is_aio_system(pecan.request.dbapi):
required_monitors = constants.MIN_STOR_MONITORS_AIO
else:
required_monitors = constants.MIN_STOR_MONITORS_MULTINODE
@ -675,7 +672,7 @@ class CephApiOperator(object):
num_active_monitors = len(active_monitors)
if (num_inv_monitors and num_active_monitors == 0 and
cutils.is_initial_config_complete() and
not utils.is_aio_system(pecan.request.dbapi)):
not cutils.is_aio_system(pecan.request.dbapi)):
# The active controller always has a monitor.
# We are on standard or storage, initial configuration
# was completed and Ceph is down so we can't check if
@ -713,7 +710,7 @@ def fix_crushmap(dbapi=None):
if not os.path.isfile(crushmap_flag_file):
_operator = CephApiOperator()
if not is_aio_system(dbapi):
if not cutils.is_aio_system(dbapi):
# At least two monitors have to be running on a standard deployment,
# otherwise don't even try to load the crushmap.
active_mons, required_mons, __ = _operator.get_monitors_status(dbapi)
@ -762,10 +759,10 @@ def get_ceph_storage_model(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
if utils.is_aio_simplex_system(dbapi):
if cutils.is_aio_simplex_system(dbapi):
return constants.CEPH_AIO_SX_MODEL
if utils.is_aio_duplex_system(dbapi):
if cutils.is_aio_duplex_system(dbapi):
return constants.CEPH_CONTROLLER_MODEL
is_storage_model = False

View File

@ -542,19 +542,6 @@ FILESYSTEM_HOSTS_SUPPORTED_LIST_DICT = {
SUPPORTED_LOGICAL_VOLUME_LIST = FILESYSTEM_LV_DICT.values()
SUPPORTED_FILEYSTEM_LIST = [
FILESYSTEM_NAME_BACKUP,
FILESYSTEM_NAME_CGCS,
FILESYSTEM_NAME_CINDER,
FILESYSTEM_NAME_DATABASE,
FILESYSTEM_NAME_EXTENSION,
FILESYSTEM_NAME_SCRATCH,
FILESYSTEM_NAME_DOCKER,
FILESYSTEM_NAME_DOCKER_DISTRIBUTION,
FILESYSTEM_NAME_PATCH_VAULT,
FILESYSTEM_NAME_ETCD,
]
SUPPORTED_REPLICATED_FILEYSTEM_LIST = [
FILESYSTEM_NAME_CGCS,
FILESYSTEM_NAME_DATABASE,

View File

@ -1615,10 +1615,12 @@ def is_filesystem_supported(fs, personality):
return False
def get_controller_fs_scratch_size():
""" Get the filesystem scratch size setup by kickstart.
def get_current_fs_size(fs_name):
""" Get the filesystem size from the lvdisplay command.
"""
volume_name = fs_name + "-lv"
args = ["lvdisplay",
"--columns",
"--options",
@ -1627,26 +1629,25 @@ def get_controller_fs_scratch_size():
"g",
"--noheading",
"--nosuffix",
"/dev/cgts-vg/scratch-lv"]
"/dev/cgts-vg/" + volume_name]
scratch_gib = 8
size_gib = 0
with open(os.devnull, "w") as fnull:
try:
lvdisplay_output = subprocess.check_output(args, stderr=fnull)
except subprocess.CalledProcessError:
raise Exception("Failed to get controller filesystem scratch size")
raise Exception("Failed to get filesystem %s size" % fs_name)
lvdisplay_dict = output_to_dict(lvdisplay_output)
scratch_gib = int(math.ceil(float(lvdisplay_dict.get('scratch-lv'))))
if not scratch_gib:
# ConfigFail
raise Exception("Unexpected scratch_gib=%s" % scratch_gib)
size_gib = int(math.ceil(float(lvdisplay_dict.get(volume_name))))
if not size_gib:
raise Exception("Unexpected size_gib=%s" % size_gib)
return scratch_gib
return size_gib
def get_controller_fs_backup_size(rootfs_device):
def get_default_controller_fs_backup_size(rootfs_device):
""" Get the filesystem backup size.
"""
@ -1654,7 +1655,8 @@ def get_controller_fs_backup_size(rootfs_device):
disk_size = int(disk_size / 1024)
if disk_size > constants.DEFAULT_SMALL_DISK_SIZE:
LOG.debug("Disk size : %s ... large disk defaults" % disk_size)
LOG.info("Disk size for %s: %s ... large disk defaults" %
(rootfs_device, disk_size))
database_storage = constants.DEFAULT_DATABASE_STOR_SIZE
@ -1664,7 +1666,8 @@ def get_controller_fs_backup_size(rootfs_device):
elif disk_size >= constants.MINIMUM_DISK_SIZE:
LOG.debug("Disk size : %s ... small disk defaults" % disk_size)
LOG.info("Disk size for %s : %s ... small disk defaults" %
(rootfs_device, disk_size))
# Due to the small size of the disk we can't provide the
# proper amount of backup space which is (database + cgcs_lv
@ -1672,7 +1675,8 @@ def get_controller_fs_backup_size(rootfs_device):
backup_lv_size = constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
else:
LOG.info("Disk size : %s ... disk too small" % disk_size)
LOG.info("Disk size for %s : %s ... disk too small" %
(rootfs_device, disk_size))
raise exception.SysinvException("Disk size requirements not met.")
return backup_lv_size
@ -1726,6 +1730,12 @@ def read_filtered_directory_content(dirpath, *filters):
def get_disk_capacity_mib(device_node):
# Check if the device_node is a full path, if not assume
# /dev/<device_node>
if device_node[0] != "/":
device_node = os.path.join('/dev', device_node)
# Run command
fdisk_command = 'fdisk -l %s | grep "^Disk %s:"' % (
device_node, device_node)
@ -2037,3 +2047,21 @@ def is_inventory_config_complete(dbapi, forihostid):
return len(pvs) > 0
except Exception:
return False
def is_aio_system(dbapi):
system = dbapi.isystem_get_one()
return system.system_type == constants.TIS_AIO_BUILD
def is_aio_simplex_system(dbapi):
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD and
system.system_mode == constants.SYSTEM_MODE_SIMPLEX)
def is_aio_duplex_system(dbapi):
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD and
(system.system_mode == constants.SYSTEM_MODE_DUPLEX or
system.system_mode == constants.SYSTEM_MODE_DUPLEX_DIRECT))

View File

@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016-2018 Wind River Systems, Inc.
# Copyright (c) 2016-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -28,8 +28,6 @@ from sysinv.openstack.common import log as logging
from sysinv.openstack.common import uuidutils
from sysinv.common.storage_backend_conf import StorageBackendConfig
from sysinv.api.controllers.v1 import utils
from sysinv.openstack.common.gettextutils import _
from sysinv.openstack.common import rpc
from sysinv.openstack.common.rpc.common import CommonRpcContext
@ -986,7 +984,7 @@ class CephOperator(object):
# Avoid calling the ceph rest_api until we have a minimum configuration
check_access = False
if utils.is_aio_system(self._db_api) and available_mons > 0:
if cutils.is_aio_system(self._db_api) and available_mons > 0:
# one monitor: need it available
check_access = True
elif available_mons > 1:

View File

@ -900,10 +900,10 @@ class ConductorManager(service.PeriodicService):
# Defaults for configurable install parameters
install_opts = []
boot_device = host.get('boot_device') or "sda"
boot_device = host.get('boot_device') or "/dev/sda"
install_opts += ['-b', boot_device]
rootfs_device = host.get('rootfs_device') or "sda"
rootfs_device = host.get('rootfs_device') or "/dev/sda"
install_opts += ['-r', rootfs_device]
install_output = host.get('install_output') or "text"
@ -1306,7 +1306,7 @@ class ConductorManager(service.PeriodicService):
host.availability == constants.AVAILABILITY_ONLINE):
# This must be the initial controller host unlock request.
personalities = [constants.CONTROLLER]
if not utils.is_aio_system(self.dbapi):
if not cutils.is_aio_system(self.dbapi):
# Standard system, touch the unlock ready flag
cutils.touch(constants.UNLOCK_READY_FLAG)
else:
@ -3292,15 +3292,7 @@ class ConductorManager(service.PeriodicService):
for host_fs in host_fs_list:
if host_fs.name == fs['name']:
found = True
LOG.debug("Host FS '%s' already exists" % fs['name'])
if host_fs.size != fs['size']:
LOG.info("Host FS uuid: %s changed size from %s to %s",
host_fs.uuid, host_fs.size, fs['size'])
# Update the database
try:
self.dbapi.host_fs_update(host_fs.id, fs_dict)
except Exception:
LOG.exception("Host FS Update failed")
LOG.info("Host FS '%s' already exists" % fs['name'])
break
if not found:
try:
@ -4518,8 +4510,6 @@ class ConductorManager(service.PeriodicService):
(standby_host.config_applied == standby_host.config_target or
standby_host.config_applied == standby_config_target_flipped)):
LOG.info("_controller_config_active_apply about to resize the filesystem")
if self._config_resize_filesystems(context, standby_host):
cutils.touch(CONFIG_CONTROLLER_FINI_FLAG)
@ -4551,8 +4541,6 @@ class ConductorManager(service.PeriodicService):
(standby_host.config_applied == standby_host.config_target or
standby_host.config_applied == standby_config_target_flipped)):
LOG.info(
"_controller_config_active_apply about to resize the filesystem")
if self._config_resize_filesystems(context, standby_host):
cutils.touch(CONFIG_CONTROLLER_FINI_FLAG)
@ -5485,12 +5473,6 @@ class ConductorManager(service.PeriodicService):
# map the updated file system to the runtime puppet class
classmap = {
constants.FILESYSTEM_NAME_BACKUP:
'platform::filesystem::backup::runtime',
constants.FILESYSTEM_NAME_SCRATCH:
'platform::filesystem::scratch::runtime',
constants.FILESYSTEM_NAME_DOCKER:
'platform::filesystem::docker::runtime',
constants.FILESYSTEM_NAME_DOCKER_DISTRIBUTION:
'platform::drbd::dockerdistribution::runtime',
constants.FILESYSTEM_NAME_DATABASE:
@ -5519,6 +5501,46 @@ class ConductorManager(service.PeriodicService):
config_uuid,
config_dict)
def update_host_filesystem_config(self, context,
host=None,
filesystem_list=None):
"""Update the filesystem configuration for a host"""
config_uuid = self._config_update_hosts(context,
personalities=host.personality,
host_uuids=[host.uuid])
LOG.info("update_host_filesystem_config config_uuid=%s" % config_uuid)
if filesystem_list:
# apply the manifest at runtime, otherwise a reboot is required
if os.path.isfile(CONFIG_CONTROLLER_FINI_FLAG):
os.remove(CONFIG_CONTROLLER_FINI_FLAG)
# map the updated file system to the runtime puppet class
classmap = {
constants.FILESYSTEM_NAME_BACKUP:
'platform::filesystem::backup::runtime',
constants.FILESYSTEM_NAME_SCRATCH:
'platform::filesystem::scratch::runtime',
constants.FILESYSTEM_NAME_DOCKER:
'platform::filesystem::docker::runtime',
}
puppet_class = None
if filesystem_list:
puppet_class = [classmap.get(fs) for fs in filesystem_list]
config_dict = {
"personalities": host.personality,
"classes": puppet_class,
"host_uuids": [host.uuid]
}
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict)
def update_lvm_config(self, context):
personalities = [constants.CONTROLLER]
@ -5707,7 +5729,7 @@ class ConductorManager(service.PeriodicService):
'platform::ceph::runtime_base',
]
if utils.is_aio_duplex_system(self.dbapi):
if cutils.is_aio_duplex_system(self.dbapi):
# On 2 node systems we have a floating Ceph monitor.
classes.append('platform::drbd::cephmon::runtime')
classes.append('platform::drbd::runtime')
@ -6470,7 +6492,7 @@ class ConductorManager(service.PeriodicService):
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
state = constants.SB_STATE_CONFIGURED
if utils.is_aio_system(self.dbapi):
if cutils.is_aio_system(self.dbapi):
task = None
cceph.fix_crushmap(self.dbapi)
else:
@ -6736,16 +6758,14 @@ class ConductorManager(service.PeriodicService):
"""
database_storage = 0
cgcs_lv_size = 0
backup_lv_size = 0
# Add the extension storage
extension_lv_size = constants.DEFAULT_EXTENSION_STOR_SIZE
scratch_lv_size = cutils.get_controller_fs_scratch_size()
system = self.dbapi.isystem_get_one()
system_dc_role = system.get('distributed_cloud_role', None)
LOG.info("Local Region Name: %s" % system.region_name)
LOG.info("Local Region Name: %s" % system.region_name)
disk_size = cutils.get_disk_capacity_mib(rootfs_device)
disk_size = int(disk_size / 1024)
@ -6814,8 +6834,6 @@ class ConductorManager(service.PeriodicService):
database_storage = constants.DEFAULT_DATABASE_STOR_SIZE
cgcs_lv_size = constants.DEFAULT_CGCS_STOR_SIZE
backup_lv_size = database_storage + \
cgcs_lv_size + constants.BACKUP_OVERHEAD
elif disk_size >= constants.MINIMUM_DISK_SIZE:
@ -6881,25 +6899,11 @@ class ConductorManager(service.PeriodicService):
constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
cgcs_lv_size = constants.DEFAULT_SMALL_CGCS_STOR_SIZE
# Due to the small size of the disk we can't provide the
# proper amount of backup space which is (database + cgcs_lv
# + BACKUP_OVERHEAD) so we are using a smaller default.
backup_lv_size = constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
else:
LOG.info("Disk size : %s ... disk too small" % disk_size)
raise exception.SysinvException("Disk size requirements not met.")
data = {
'name': constants.FILESYSTEM_NAME_BACKUP,
'size': backup_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_BACKUP],
'replicated': False,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
data = {
'name': constants.FILESYSTEM_NAME_CGCS,
'size': cgcs_lv_size,
@ -6922,17 +6926,6 @@ class ConductorManager(service.PeriodicService):
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
data = {
'name': constants.FILESYSTEM_NAME_SCRATCH,
'size': scratch_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_SCRATCH],
'replicated': False,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
data = {
'name': constants.FILESYSTEM_NAME_EXTENSION,
'size': extension_lv_size,
@ -6944,19 +6937,6 @@ class ConductorManager(service.PeriodicService):
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
docker_lv_size = constants.KUBERNETES_DOCKER_STOR_SIZE
data = {
'name': constants.FILESYSTEM_NAME_DOCKER,
'size': docker_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER],
'replicated': False,
}
LOG.info("Creating FS:%s:%s %d" % (
data['name'], data['logical_volume'], data['size']))
self.dbapi.controller_fs_create(data)
# ETCD fs added to cgts-lv
etcd_lv_size = constants.ETCD_STOR_SIZE
@ -7413,8 +7393,6 @@ class ConductorManager(service.PeriodicService):
"""Resize the filesystems upon completion of storage config.
Retry in case of errors or racing issues when resizing fails."""
LOG.warn("resizing filesystems")
progress = ""
retry_attempts = 3
rc = False
@ -7422,10 +7400,7 @@ class ConductorManager(service.PeriodicService):
try:
if standby_host:
if not self._drbd_connected():
LOG.info("resizing filesystems WAIT for drbd connected")
return rc
else:
LOG.info("resizing filesystems drbd connected")
if not os.path.isfile(CFS_DRBDADM_RECONFIGURED):
progress = "drbdadm resize all"

View File

@ -804,6 +804,23 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
)
)
def update_host_filesystem_config(self, context,
host=None,
filesystem_list=None):
"""Synchronously, have the conductor update the host's filesystem.
:param context: request context.
:param host: the host to update the filesystems on.
:param filesystem_list: list of host filesystems.
"""
return self.call(
context, self.make_msg(
'update_host_filesystem_config',
host=host,
filesystem_list=filesystem_list
)
)
def update_lvm_config(self, context):
"""Synchronously, have the conductor update the LVM configuration.

View File

@ -222,8 +222,8 @@ class ihost(Base):
config_applied = Column(String(255))
config_target = Column(String(255))
boot_device = Column(String(255), default="sda")
rootfs_device = Column(String(255), default="sda")
boot_device = Column(String(255), default="/dev/sda")
rootfs_device = Column(String(255), default="/dev/sda")
install_output = Column(String(255), default="text")
console = Column(String(255), default="ttyS0,115200")
tboot = Column(String(64), default="")

View File

@ -1,12 +1,12 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.openstack.common import log as logging
from sysinv.helm import common
from sysinv.helm import base

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -23,24 +23,16 @@ class StoragePuppet(base.BasePuppet):
config = {}
config.update(self._get_partition_config(host))
config.update(self._get_lvm_config(host))
config.update(self._get_host_fs_config(host))
return config
def _get_filesystem_config(self):
config = {}
controller_fs_list = self.dbapi.controller_fs_get_list()
for controller_fs in controller_fs_list:
if controller_fs.name == constants.FILESYSTEM_NAME_BACKUP:
config.update({
'platform::filesystem::backup::params::lv_size':
controller_fs.size
})
elif controller_fs.name == constants.FILESYSTEM_NAME_SCRATCH:
config.update({
'platform::filesystem::scratch::params::lv_size':
controller_fs.size
})
elif controller_fs.name == constants.FILESYSTEM_NAME_DATABASE:
if controller_fs.name == constants.FILESYSTEM_NAME_DATABASE:
pgsql_gib = int(controller_fs.size) * 2
config.update({
'platform::drbd::pgsql::params::lv_size': pgsql_gib
@ -61,11 +53,6 @@ class StoragePuppet(base.BasePuppet):
'platform::drbd::patch_vault::params::lv_size':
controller_fs.size,
})
elif controller_fs.name == constants.FILESYSTEM_NAME_DOCKER:
config.update({
'platform::filesystem::docker::params::lv_size':
controller_fs.size
})
elif controller_fs.name == constants.FILESYSTEM_NAME_ETCD:
config.update({
'platform::drbd::etcd::params::lv_size':
@ -243,3 +230,23 @@ class StoragePuppet(base.BasePuppet):
def format_lvm_filter(self, devices):
filters = ['"a|%s|"' % f for f in devices] + ['"r|.*|"']
return '[ %s ]' % ', '.join(filters)
def _get_host_fs_config(self, host):
config = {}
filesystems = self.dbapi.host_fs_get_by_ihost(host.id)
for fs in filesystems:
if fs.name == constants.FILESYSTEM_NAME_BACKUP:
config.update({
'platform::filesystem::backup::params::lv_size': fs.size
})
elif fs.name == constants.FILESYSTEM_NAME_SCRATCH:
config.update({
'platform::filesystem::scratch::params::lv_size': fs.size
})
elif fs.name == constants.FILESYSTEM_NAME_DOCKER:
config.update({
'platform::filesystem::docker::params::lv_size': fs.size
})
return config