Ceph for standard: Puppet functional changes
In order to enable Openstack's helm charts on StarlingX we need a distributed persistent storage for Kubernetes that leverages our existing storage configurations. For this stage we will enable CEPH's RBD to work with Kubernetes RBD provisioner through a new Helm chart. Since RBD will be the persistent storage solution, CEPH support has to be extended to the 1, 2 node and Standard configurations. This commit enables CEPH on a standard configuration without dedicated storage by allowing users to add the 3rd Ceph monitor to a worker node. It implements the puppet functional part that enables ceph to work on a standard deployment. Details: * Update ceph.conf with the new monitor by removing unused ones * Reserve space on any worker for ceph-mon-lv, this will make sure that user is able to configure a ceph-mon at any time without the need to extend platform-vg. * In System Inventory make sure the correct IP addresses are passed to the puppet yaml's. Change-Id: I7b8a01a7f2cf18f20e3509c2416f038580b4a071 Implements: containerization-2002844-CEPH-persistent-storage-backend-for-Kubernetes Story: 2002844 Task: 28723 Depends-On: https://review.openstack.org/629166/ Signed-off-by: Ovidiu Poncea <Ovidiu.Poncea@windriver.com>
This commit is contained in:
parent
0831a616b3
commit
1b776e7aeb
|
@ -104,3 +104,6 @@ platform::collectd::params::plugins: ['fm_notifier', 'mtce_notifier']
|
|||
platform::collectd::params::mtce_notifier_port: 2101
|
||||
platform::collectd::params::log_traces: true
|
||||
platform::collectd::params::encoding: "utf-8"
|
||||
|
||||
# ceph
|
||||
platform::ceph::params::mon_lv_size_reserved: 20
|
||||
|
|
|
@ -26,7 +26,6 @@ include ::platform::patching
|
|||
include ::platform::remotelogging
|
||||
include ::platform::mtce
|
||||
include ::platform::sysinv
|
||||
include ::platform::ceph
|
||||
include ::platform::devices
|
||||
include ::platform::grub
|
||||
include ::platform::collectd
|
||||
|
@ -37,6 +36,9 @@ include ::platform::kubernetes::worker
|
|||
include ::platform::multipath
|
||||
include ::platform::client
|
||||
|
||||
include ::platform::ceph
|
||||
include ::platform::ceph::monitor
|
||||
|
||||
include ::openstack::client
|
||||
include ::openstack::neutron
|
||||
include ::openstack::neutron::agents
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
Facter.add("configured_ceph_monitors") do
|
||||
setcode do
|
||||
lines = IO.readlines("/etc/ceph/ceph.conf").keep_if { |v| v =~ /\[mon\..*\]/ }
|
||||
lines.collect do |line|
|
||||
line.scan(/\[mon\.(.*)\]/).last.first
|
||||
end
|
||||
end
|
||||
end
|
|
@ -5,6 +5,7 @@ class platform::ceph::params(
|
|||
$authentication_type = 'none',
|
||||
$mon_lv_name = 'ceph-mon-lv',
|
||||
$mon_lv_size = 0,
|
||||
$mon_lv_size_reserved = 20,
|
||||
$mon_fs_type = 'ext4',
|
||||
$mon_fs_options = ' ',
|
||||
$mon_mountpoint = '/var/lib/ceph/mon',
|
||||
|
@ -37,6 +38,7 @@ class platform::ceph::params(
|
|||
$rgw_gc_processor_period = '300',
|
||||
$restapi_public_addr = undef,
|
||||
$configure_ceph_mon_info = false,
|
||||
$ceph_config_file = '/etc/ceph/ceph.conf',
|
||||
$ceph_config_ready_path = '/var/run/.ceph_started',
|
||||
$node_ceph_configured_flag = '/etc/platform/.node_ceph_configured',
|
||||
) { }
|
||||
|
@ -58,7 +60,7 @@ class platform::ceph
|
|||
$mon_initial_members = $floating_mon_host
|
||||
}
|
||||
} else {
|
||||
# Multinode, any 2 monitors form a cluster
|
||||
# Multinode & standard, any 2 monitors form a cluster
|
||||
$mon_initial_members = undef
|
||||
}
|
||||
|
||||
|
@ -89,15 +91,48 @@ class platform::ceph
|
|||
}
|
||||
}
|
||||
} else {
|
||||
# Multinode has 3 monitors.
|
||||
# Multinode & standard have 3 monitors
|
||||
Class['::ceph']
|
||||
-> ceph_config {
|
||||
"mon.${mon_0_host}/host": value => $mon_0_host;
|
||||
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
|
||||
"mon.${mon_1_host}/host": value => $mon_1_host;
|
||||
"mon.${mon_1_host}/mon_addr": value => $mon_1_addr;
|
||||
"mon.${mon_2_host}/host": value => $mon_2_host;
|
||||
"mon.${mon_2_host}/mon_addr": value => $mon_2_addr;
|
||||
}
|
||||
if $mon_2_host {
|
||||
Class['::ceph']
|
||||
-> ceph_config {
|
||||
"mon.${mon_2_host}/host": value => $mon_2_host;
|
||||
"mon.${mon_2_host}/mon_addr": value => $mon_2_addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Remove old, no longer in use, monitor hosts from Ceph's config file
|
||||
$valid_monitors = [ $mon_0_host, $mon_1_host, $mon_2_host ]
|
||||
|
||||
$::configured_ceph_monitors.each |Integer $index, String $monitor| {
|
||||
if ! ($monitor in $valid_monitors) {
|
||||
notice("Removing ${monitor} from ${ceph_config_file}")
|
||||
|
||||
# Remove all monitor settings of a section
|
||||
$mon_settings = {
|
||||
"mon.${monitor}" => {
|
||||
'public_addr' => { 'ensure' => 'absent' },
|
||||
'host' => { 'ensure' => 'absent' },
|
||||
'mon_addr' => { 'ensure' => 'absent' },
|
||||
}
|
||||
}
|
||||
$defaults = { 'path' => $ceph_config_file }
|
||||
create_ini_settings($mon_settings, $defaults)
|
||||
|
||||
# Remove section header
|
||||
Ini_setting<| |>
|
||||
-> file_line { "[mon.${monitor}]":
|
||||
ensure => absent,
|
||||
path => $ceph_config_file,
|
||||
line => "[mon.${monitor}]"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,8 +165,10 @@ class platform::ceph::post
|
|||
class platform::ceph::monitor
|
||||
inherits ::platform::ceph::params {
|
||||
|
||||
include ::platform::kubernetes::params
|
||||
$system_mode = $::platform::params::system_mode
|
||||
$system_type = $::platform::params::system_type
|
||||
$k8s_enabled = $::platform::kubernetes::params::enabled
|
||||
|
||||
if $service_enabled {
|
||||
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
|
||||
|
@ -145,13 +182,26 @@ class platform::ceph::monitor
|
|||
}
|
||||
} else {
|
||||
# Simplex, multinode. Ceph is pmon managed.
|
||||
$configure_ceph_mon = true
|
||||
if $::hostname == $mon_0_host or $::hostname == $mon_1_host or $::hostname == $mon_2_host {
|
||||
$configure_ceph_mon = true
|
||||
} else {
|
||||
$configure_ceph_mon = false
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
$configure_ceph_mon = false
|
||||
}
|
||||
|
||||
if $::personality == 'worker' and ! $configure_ceph_mon and $k8s_enabled {
|
||||
# Reserve space for ceph-mon on all worker nodes.
|
||||
include ::platform::filesystem::params
|
||||
logical_volume { $mon_lv_name:
|
||||
ensure => present,
|
||||
volume_group => $::platform::filesystem::params::vg_name,
|
||||
size => "${mon_lv_size_reserved}G",
|
||||
} -> Class['platform::filesystem::docker']
|
||||
}
|
||||
|
||||
if $configure_ceph_mon {
|
||||
file { '/var/lib/ceph':
|
||||
ensure => 'directory',
|
||||
|
@ -173,6 +223,10 @@ class platform::ceph::monitor
|
|||
fs_options => $mon_fs_options,
|
||||
} -> Class['::ceph']
|
||||
|
||||
if $k8s_enabled and $::personality == 'worker' {
|
||||
Platform::Filesystem[$mon_lv_name] -> Class['platform::filesystem::docker']
|
||||
}
|
||||
|
||||
file { '/etc/pmon.d/ceph.conf':
|
||||
ensure => link,
|
||||
target => '/etc/ceph/ceph.conf.pmon',
|
||||
|
@ -185,19 +239,11 @@ class platform::ceph::monitor
|
|||
# ensure configuration is complete before creating monitors
|
||||
Class['::ceph'] -> Ceph::Mon <| |>
|
||||
|
||||
# Start service on AIO SX and on active controller
|
||||
# to allow in-service configuration.
|
||||
if str2bool($::is_controller_active) or $system_type == 'All-in-one' {
|
||||
$service_ensure = 'running'
|
||||
} else {
|
||||
$service_ensure = 'stopped'
|
||||
}
|
||||
|
||||
# default configuration for all ceph monitor resources
|
||||
Ceph::Mon {
|
||||
fsid => $cluster_uuid,
|
||||
authentication_type => $authentication_type,
|
||||
service_ensure => $service_ensure,
|
||||
service_ensure => 'running'
|
||||
}
|
||||
|
||||
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
|
||||
|
|
|
@ -219,17 +219,28 @@ class StorageBackendConfig(object):
|
|||
network_type): 'ceph-mon-0-ip',
|
||||
'%s-%s' % (constants.CONTROLLER_1_HOSTNAME,
|
||||
network_type): 'ceph-mon-1-ip',
|
||||
'%s-%s' % (constants.STORAGE_0_HOSTNAME,
|
||||
network_type): 'ceph-mon-2-ip'
|
||||
}
|
||||
|
||||
ceph_mons = dbapi.ceph_mon_get_list()
|
||||
for ceph_mon in ceph_mons:
|
||||
if ceph_mon['hostname'] == constants.CONTROLLER_0_HOSTNAME:
|
||||
targets.update({'%s-%s' % (constants.CONTROLLER_0_HOSTNAME,
|
||||
network_type): 'ceph-mon-0-ip'})
|
||||
elif ceph_mon['hostname'] == constants.CONTROLLER_1_HOSTNAME:
|
||||
targets.update({'%s-%s' % (constants.CONTROLLER_1_HOSTNAME,
|
||||
network_type): 'ceph-mon-1-ip'})
|
||||
else:
|
||||
targets.update({'%s-%s' % (ceph_mon['hostname'],
|
||||
network_type): 'ceph-mon-2-ip'})
|
||||
|
||||
ceph_mon['ceph_mon_gib'] = ceph_mons[0]['ceph_mon_gib']
|
||||
|
||||
results = {}
|
||||
addrs = dbapi.addresses_get_all()
|
||||
for addr in addrs:
|
||||
if addr.name in targets:
|
||||
results[targets[addr.name]] = addr.address
|
||||
if len(results) != len(targets):
|
||||
raise exception.IncompleteCephMonNetworkConfig(
|
||||
targets=targets, results=results)
|
||||
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -9,6 +9,7 @@ import uuid
|
|||
|
||||
from sysinv.api.controllers.v1 import utils
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
|
||||
from sysinv.puppet import openstack
|
||||
|
@ -49,6 +50,17 @@ class CephPuppet(openstack.OpenstackBasePuppet):
|
|||
ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses(
|
||||
self.dbapi)
|
||||
|
||||
controller_hosts = [constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME]
|
||||
mon_2_host = [mon['hostname'] for mon in self.dbapi.ceph_mon_get_list() if
|
||||
mon['hostname'] not in controller_hosts]
|
||||
if len(mon_2_host) > 1:
|
||||
raise exception.SysinvException(
|
||||
'Too many ceph monitor hosts, expected 1, got: %s.' % mon_2_host)
|
||||
if mon_2_host:
|
||||
mon_2_host = mon_2_host[0]
|
||||
else:
|
||||
mon_2_host = None
|
||||
|
||||
# TODO: k8s on AIO-SX: Temporarily need to move the ceph monitor address
|
||||
# from a loopback address to the OAM address so the ceph monitor is
|
||||
# reachable from the cluster pods.
|
||||
|
@ -59,12 +71,15 @@ class CephPuppet(openstack.OpenstackBasePuppet):
|
|||
else:
|
||||
mon_0_ip = ceph_mon_ips['ceph-mon-0-ip']
|
||||
mon_1_ip = ceph_mon_ips['ceph-mon-1-ip']
|
||||
mon_2_ip = ceph_mon_ips['ceph-mon-2-ip']
|
||||
mon_2_ip = ceph_mon_ips.get('ceph-mon-2-ip', None)
|
||||
floating_mon_ip = ceph_mon_ips['ceph-floating-mon-ip']
|
||||
|
||||
mon_0_addr = self._format_ceph_mon_address(mon_0_ip)
|
||||
mon_1_addr = self._format_ceph_mon_address(mon_1_ip)
|
||||
mon_2_addr = self._format_ceph_mon_address(mon_2_ip)
|
||||
if mon_2_ip:
|
||||
mon_2_addr = self._format_ceph_mon_address(mon_2_ip)
|
||||
else:
|
||||
mon_2_addr = None
|
||||
floating_mon_addr = self._format_ceph_mon_address(floating_mon_ip)
|
||||
|
||||
# ceph can not bind to multiple address families, so only enable IPv6
|
||||
|
@ -85,8 +100,7 @@ class CephPuppet(openstack.OpenstackBasePuppet):
|
|||
constants.CONTROLLER_0_HOSTNAME,
|
||||
'platform::ceph::params::mon_1_host':
|
||||
constants.CONTROLLER_1_HOSTNAME,
|
||||
'platform::ceph::params::mon_2_host':
|
||||
constants.STORAGE_0_HOSTNAME,
|
||||
'platform::ceph::params::mon_2_host': mon_2_host,
|
||||
|
||||
'platform::ceph::params::floating_mon_ip': floating_mon_ip,
|
||||
'platform::ceph::params::mon_0_ip': mon_0_ip,
|
||||
|
@ -167,8 +181,8 @@ class CephPuppet(openstack.OpenstackBasePuppet):
|
|||
def get_host_config(self, host):
|
||||
config = {}
|
||||
if host.personality in [constants.CONTROLLER, constants.STORAGE]:
|
||||
config.update(self._get_ceph_mon_config(host))
|
||||
config.update(self._get_ceph_osd_config(host))
|
||||
config.update(self._get_ceph_mon_config(host))
|
||||
|
||||
# if it is a worker node and on an secondary region,
|
||||
# check if ceph mon configuration is required
|
||||
|
|
Loading…
Reference in New Issue