CEPH support for 2 node configuration
In order to enable Openstack's helm charts on StarlingX we need a distributed persistent storage for Kubernetes that leverages our existing configurations. Changes made: - treat single and two node configuations the same as storage configurations is Kubernetes is enabled - add support for the kube-rbd ceph pool Change-Id: I57a9368782b2cd646f618d2433537233d32cbc7b Depends-On: Ic97b9fafa752a40befe395be2cafd3096010cc5b Co-Authored-By: Stefan Dinescu <stefan.dinescu@windriver.com> Story: 2002844 Task: 26878 Signed-off-by: Stefan Dinescu <stefan.dinescu@windriver.com>
This commit is contained in:
parent
f7b0ea1a5e
commit
d5847a8f65
|
@ -1411,9 +1411,9 @@ class StorageTier(base.APIResourceWrapper):
|
|||
class StorageCeph(base.APIResourceWrapper):
|
||||
"""..."""
|
||||
|
||||
_attrs = ['cinder_pool_gib', 'glance_pool_gib', 'ephemeral_pool_gib',
|
||||
'object_pool_gib', 'object_gateway', 'uuid', 'tier_name', 'link',
|
||||
'ceph_total_space_gib']
|
||||
_attrs = ['cinder_pool_gib', 'kube_pool_gib', 'glance_pool_gib',
|
||||
'ephemeral_pool_gib', 'object_pool_gib', 'object_gateway',
|
||||
'uuid', 'tier_name', 'link', 'ceph_total_space_gib']
|
||||
|
||||
def __init__(self, apiresource):
|
||||
super(StorageCeph, self).__init__(apiresource)
|
||||
|
@ -1421,6 +1421,7 @@ class StorageCeph(base.APIResourceWrapper):
|
|||
if hasattr(self, 'uuid'):
|
||||
self._tier_name = self.tier_name
|
||||
self._cinder_pool_gib = self.cinder_pool_gib
|
||||
self._kube_pool_gib = self.kube_pool_gib
|
||||
self._glance_pool_gib = self.glance_pool_gib
|
||||
self._ephemeral_pool_gib = self.ephemeral_pool_gib
|
||||
self._object_pool_gib = self.object_pool_gib
|
||||
|
@ -1429,6 +1430,7 @@ class StorageCeph(base.APIResourceWrapper):
|
|||
else:
|
||||
self._tier_name = None
|
||||
self._cinder_pool_gib = None
|
||||
self._kube_pool_gib = None
|
||||
self._glance_pool_gib = None
|
||||
self._ephemeral_pool_gib = None
|
||||
self._object_pool_gib = None
|
||||
|
@ -1443,6 +1445,10 @@ class StorageCeph(base.APIResourceWrapper):
|
|||
def cinder_pool_gib(self):
|
||||
return self._cinder_pool_gib
|
||||
|
||||
@property
|
||||
def kube_pool_gib(self):
|
||||
return self._kube_pool_gib
|
||||
|
||||
@property
|
||||
def glance_pool_gib(self):
|
||||
return self._glance_pool_gib
|
||||
|
@ -1738,7 +1744,7 @@ def controllerfs_list(request):
|
|||
controllerfs = cgtsclient(request).controller_fs.list()
|
||||
ceph_mon_list = cgtsclient(request).ceph_mon.list()
|
||||
|
||||
if ceph_mon_list and not is_system_mode_simplex(request):
|
||||
if ceph_mon_list and not is_system_k8s_aio(request):
|
||||
controllerfs.append(ceph_mon_list[0])
|
||||
|
||||
return [ControllerFS(n) for n in controllerfs]
|
||||
|
@ -2546,3 +2552,25 @@ def get_system_type(request):
|
|||
systems = system_list(request)
|
||||
system_type = systems[0].to_dict().get('system_type')
|
||||
return system_type
|
||||
|
||||
|
||||
def is_kubernetes_config(request):
|
||||
systems = system_list(request)
|
||||
system_capabilities = systems[0].to_dict().get('capabilities')
|
||||
if system_capabilities.get('kubernetes_enabled', False):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_system_k8s_aio(request):
|
||||
system_type = get_system_type(request)
|
||||
|
||||
if (system_type == SYSTEM_TYPE_AIO and
|
||||
is_kubernetes_config(request)):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_host_with_storage(request, host_id):
|
||||
host = host_get(request, host_id)
|
||||
return 'storage' in host.subfunctions or is_system_k8s_aio(request)
|
||||
|
|
|
@ -33,17 +33,18 @@ class CreateStorageVolume(tables.LinkAction):
|
|||
return reverse(self.url, args=(host_id,))
|
||||
|
||||
def allowed(self, request, datum):
|
||||
is_system_k8s_aio = sysinv.is_system_k8s_aio(request)
|
||||
host = self.table.kwargs['host']
|
||||
self.verbose_name = _("Assign Storage Function")
|
||||
|
||||
classes = [c for c in self.classes if c != "disabled"]
|
||||
self.classes = classes
|
||||
|
||||
if host._personality != 'storage':
|
||||
if host._personality != 'storage' and not is_system_k8s_aio:
|
||||
return False
|
||||
|
||||
if host._administrative == 'unlocked':
|
||||
if 'storage' in host._subfunctions:
|
||||
if 'storage' in host._subfunctions or is_system_k8s_aio:
|
||||
if "disabled" not in self.classes:
|
||||
self.classes = [c for c in self.classes] + ['disabled']
|
||||
self.verbose_name = string_concat(self.verbose_name, ' ',
|
||||
|
@ -63,7 +64,7 @@ class CreateDiskProfile(tables.LinkAction):
|
|||
return reverse(self.url, args=(host_id,))
|
||||
|
||||
def allowed(self, request, datum):
|
||||
return True
|
||||
return not sysinv.is_system_mode_simplex(request)
|
||||
|
||||
|
||||
class CreatePartition(tables.LinkAction):
|
||||
|
|
|
@ -201,6 +201,8 @@ class AddDiskProfileView(forms.ModalFormView):
|
|||
context = super(AddDiskProfileView, self).get_context_data(**kwargs)
|
||||
context['host_id'] = self.kwargs['host_id']
|
||||
context['host'] = self.get_myhost_data()
|
||||
context['is_host_with_storage'] = sysinv.is_host_with_storage(
|
||||
self.request, self.kwargs['host_id'])
|
||||
return context
|
||||
|
||||
def get_initial(self):
|
||||
|
|
|
@ -498,6 +498,8 @@ class StorageTab(tabs.TableTab):
|
|||
redirect=redirect)
|
||||
|
||||
context['cinder_backend'] = stx_api.sysinv.get_cinder_backend(request)
|
||||
context['is_system_k8s_aio'] = \
|
||||
stx_api.sysinv.is_system_k8s_aio(request)
|
||||
|
||||
return context
|
||||
|
||||
|
|
|
@ -10,11 +10,12 @@
|
|||
{{ partitions_table.render }}
|
||||
</div>
|
||||
|
||||
{% if host.personality == "Storage" %}
|
||||
{% if host.personality == "Storage" or is_system_k8s_aio %}
|
||||
<div id="storagevolumes">
|
||||
{{ storagevolumes_table.render }}
|
||||
</div>
|
||||
{% else %}
|
||||
{% endif %}
|
||||
{% if host.personality != "Storage" %}
|
||||
<div id="localvolumegroups">
|
||||
{{ localvolumegroups_table.render }}
|
||||
</div>
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
{{ "<br>With the following configuration:" }}
|
||||
<table class="{% block table_css_classes %}table table-bordered table-striped datatable {{ table.css_classes }}{% endblock %}">
|
||||
{% if host.stors and 'storage' in host.subfunctions %}
|
||||
{% if host.stors and is_host_with_storage %}
|
||||
<tr>
|
||||
<th>{% trans "Disk" %}</th>
|
||||
<th>{% trans "Storage" %}</th>
|
||||
|
|
|
@ -818,6 +818,13 @@ class UpdateiStoragePools(forms.SelfHandlingForm):
|
|||
min_value=0,
|
||||
widget=forms.NumberInput(attrs=js_attrs))
|
||||
|
||||
self.fields['kube_pool_gib'] = forms.IntegerField(
|
||||
label=_("Kubernetes Pool (GiB)"),
|
||||
required=True,
|
||||
help_text=_("Storage space allocated to Kubernetes in gibibytes."),
|
||||
min_value=0,
|
||||
widget=forms.NumberInput(attrs=js_attrs))
|
||||
|
||||
if self._tier_name == 'storage':
|
||||
self.fields['glance_pool_gib'] = forms.IntegerField(
|
||||
label=_("Glance Image Pool (GiB)"),
|
||||
|
@ -869,6 +876,9 @@ class UpdateiStoragePools(forms.SelfHandlingForm):
|
|||
if hasattr(storage_config, 'cinder_pool_gib'):
|
||||
STORAGE_VALUES['cinder_pool_gib'] = \
|
||||
str(storage_config._cinder_pool_gib)
|
||||
if hasattr(storage_config, 'kube_pool_gib'):
|
||||
STORAGE_VALUES['kube_pool_gib'] = \
|
||||
str(storage_config._kube_pool_gib)
|
||||
if hasattr(storage_config, 'glance_pool_gib'):
|
||||
STORAGE_VALUES['glance_pool_gib'] = \
|
||||
str(storage_config._glance_pool_gib)
|
||||
|
@ -895,6 +905,7 @@ class UpdateiStoragePools(forms.SelfHandlingForm):
|
|||
else:
|
||||
storage_config_uuid = ' '
|
||||
data = {'cinder_pool_gib': '',
|
||||
'kube_pool_gib': '',
|
||||
'glance_pool_gib': '',
|
||||
'ephemeral_pool_gib': '',
|
||||
'object_pool_gib': ''}
|
||||
|
|
|
@ -348,6 +348,10 @@ class iStoragePoolsTable(tables.DataTable):
|
|||
'cinder_pool_gib',
|
||||
verbose_name=_('Cinder Volume Storage (GiB)'))
|
||||
|
||||
kube_pool_gib = tables.Column(
|
||||
'kube_pool_gib',
|
||||
verbose_name=_('Kubernetes Storage (GiB)'))
|
||||
|
||||
glance_pool_gib = tables.Column(
|
||||
'glance_pool_gib',
|
||||
verbose_name=_('Glance Image Storage (GiB)'))
|
||||
|
|
|
@ -13,15 +13,26 @@
|
|||
var glance = document.getElementById('id_glance_pool_gib'),
|
||||
ephemeral = document.getElementById('id_ephemeral_pool_gib'),
|
||||
cinder = document.getElementById('id_cinder_pool_gib'),
|
||||
kube = document.getElementById('id_kube_pool_gib'),
|
||||
obj = document.getElementById('id_object_pool_gib');
|
||||
|
||||
var dynamic_quota = document.getElementById('total_quota');
|
||||
|
||||
current_quota_total = 0
|
||||
if (glance != null)
|
||||
current_quota_total += (parseInt(glance.value) || 0)
|
||||
|
||||
current_quota_total = (parseInt(glance.value) || 0)
|
||||
+ (parseInt(cinder.value) || 0)
|
||||
+ (parseInt(ephemeral.value) || 0)
|
||||
+ (parseInt(obj.value) || 0);
|
||||
if (ephemeral != null)
|
||||
current_quota_total += (parseInt(ephemeral.value) || 0)
|
||||
|
||||
if (cinder != null)
|
||||
current_quota_total += (parseInt(cinder.value) || 0)
|
||||
|
||||
if (kube != null)
|
||||
current_quota_total += (parseInt(kube.value) || 0)
|
||||
|
||||
if (obj != null)
|
||||
current_quota_total += (parseInt(obj.value) || 0);
|
||||
|
||||
total_tier_size = parseInt(document.getElementById('total_tier_size').innerHTML) || 0;
|
||||
dynamic_quota.innerHTML = current_quota_total;
|
||||
|
@ -45,13 +56,19 @@
|
|||
<p>{% trans "From here you can update the quota allocated to the pools associated with this Ceph storage tier." %}</p>
|
||||
<p>{% trans "A quota value of 0 will allow the storage associated with that pool to consume all available space in the tier." %}</p>
|
||||
{% if tier_name == 'storage' %}
|
||||
<p>{% trans "A quota value of 0 is not allowed for the cinder pool or glance pool." %}</p>
|
||||
<p>{% trans "A quota value of 0 is not allowed for the cinder, glance or kube pool." %}</p>
|
||||
<p>{% trans "The sum of the desired quotas must equal 100% of the cluster size." %}</p>
|
||||
{% else %}
|
||||
<p>{% trans "A quota value of 0 is allowed for the cinder pool." %}</p>
|
||||
<p>{% trans "A quota value of 0 is allowed for the cinder or kube pool." %}</p>
|
||||
{% endif %}
|
||||
|
||||
<p> <strong><span id="quota_text" style="color:red"><span id="total_quota">{{ configured_quota }}</span> GiB out of <span id="total_tier_size">{{ tier_total }}</span> GiB configured</span></strong></p>
|
||||
<p>
|
||||
<strong><span id="quota_text" style="color:red">
|
||||
<span id="total_quota">{{ configured_quota }}</span> GiB out of
|
||||
<span id="total_tier_size">{{ tier_total }}</span>
|
||||
GiB configured</span>
|
||||
</strong>
|
||||
</p>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
|
|
|
@ -324,6 +324,8 @@ class UpdateiStoragePoolsView(forms.ModalFormView):
|
|||
# check before adding each value in case it is None
|
||||
if s.cinder_pool_gib:
|
||||
ctxt['configured_quota'] += s.cinder_pool_gib
|
||||
if s.kube_pool_gib:
|
||||
ctxt['configured_quota'] += s.kube_pool_gib
|
||||
if s.glance_pool_gib:
|
||||
ctxt['configured_quota'] += s.glance_pool_gib
|
||||
if s.ephemeral_pool_gib:
|
||||
|
@ -338,6 +340,7 @@ class UpdateiStoragePoolsView(forms.ModalFormView):
|
|||
form_data = {'uuid': ' ',
|
||||
'tier_name': None,
|
||||
'cinder_pool_gib': None,
|
||||
'kube_pool_gib': None,
|
||||
'glance_pool_gib': None,
|
||||
'ephemeral_pool_gib': None,
|
||||
'object_pool_gib': None}
|
||||
|
@ -359,6 +362,9 @@ class UpdateiStoragePoolsView(forms.ModalFormView):
|
|||
if 'cinder_pool_gib' in storage_attrs:
|
||||
form_data['cinder_pool_gib'] = s.cinder_pool_gib
|
||||
|
||||
if 'kube_pool_gib' in storage_attrs:
|
||||
form_data['kube_pool_gib'] = s.kube_pool_gib
|
||||
|
||||
if 'glance_pool_gib' in storage_attrs:
|
||||
form_data['glance_pool_gib'] = s.glance_pool_gib
|
||||
|
||||
|
|
Loading…
Reference in New Issue