CEPH support for 2 node configuration

In order to enable Openstack's helm charts on StarlingX we need
a distributed persistent storage for Kubernetes that leverages
our existing configurations.

Changes made:
- allow CEPH to be configured on a 2 node configuration
  with a single floating monitor.
- floating monitor is managed by SM.
- the CEPH monitor filesystem is DRBD replicated between
  the two controller nodes
- add ceph crushmap for two node setup; both controllers are
  in the same group and redundancy is created between the
  two nodes
- only replication 2 is supported

Change-Id: Ic97b9fafa752a40befe395be2cafd3096010cc5b
Co-Authored-By: Stefan Dinescu <stefan.dinescu@windriver.com>
Depends-On: I8f9ea4798070e08171ad73da39821bc20b7af231
Story: 2002844
Task: 26878
Signed-off-by: Stefan Dinescu <stefan.dinescu@windriver.com>
This commit is contained in:
Ovidiu Poncea 2018-11-15 11:44:37 +00:00 committed by Stefan Dinescu
parent 977112e99e
commit 4b004e1d49
17 changed files with 383 additions and 103 deletions

View File

@ -25,7 +25,6 @@ class openstack::cinder::params (
$initial_cinder_lvm_config_flag = "${::platform::params::config_path}/.initial_cinder_lvm_config_complete",
$initial_cinder_ceph_config_flag = "${::platform::params::config_path}/.initial_cinder_ceph_config_complete",
$node_cinder_lvm_config_flag = '/etc/platform/.node_cinder_lvm_config_complete',
$node_cinder_ceph_config_flag = '/etc/platform/.node_cinder_ceph_config_complete',
) {
$cinder_disk = regsubst($cinder_device, '-part\d+$', '')
@ -75,16 +74,8 @@ class openstack::cinder::params (
} else {
$is_initial_cinder_ceph = false
}
# Check if we should configure/reconfigure cinder LVM for this node.
# True in case of node reinstalls etc.
if str2bool($::is_node_cinder_ceph_config) {
$is_node_cinder_ceph = true
} else {
$is_node_cinder_ceph = false
}
} else {
$is_initial_cinder_ceph = false
$is_node_cinder_ceph = false
}
# Cinder needs to be running on initial configuration of either Ceph or LVM
@ -727,12 +718,6 @@ class openstack::cinder::post
}
}
if $is_node_cinder_ceph {
file { $node_cinder_ceph_config_flag:
ensure => present
}
}
# cinder-api needs to be running in order to apply the cinder manifest,
# however, it needs to be stopped/disabled to allow SM to manage the service.
# To allow for the transition it must be explicitly stopped. Once puppet

View File

@ -0,0 +1,7 @@
# Returns true if Ceph has been configured on current node
Facter.add("is_node_ceph_configured") do
setcode do
File.exist?('/etc/platform/.node_ceph_configured')
end
end

View File

@ -1,7 +0,0 @@
# Returns true if cinder Ceph needs to be configured on current node
Facter.add("is_node_cinder_ceph_config") do
setcode do
! File.exist?('/etc/platform/.node_cinder_ceph_config_complete')
end
end

View File

@ -8,6 +8,9 @@ class platform::ceph::params(
$mon_fs_type = 'ext4',
$mon_fs_options = ' ',
$mon_mountpoint = '/var/lib/ceph/mon',
$floating_mon_host = undef,
$floating_mon_ip = undef,
$floating_mon_addr = undef,
$mon_0_host = undef,
$mon_0_ip = undef,
$mon_0_addr = undef,
@ -35,6 +38,7 @@ class platform::ceph::params(
$restapi_public_addr = undef,
$configure_ceph_mon_info = false,
$ceph_config_ready_path = '/var/run/.ceph_started',
$node_ceph_configured_flag = '/etc/platform/.node_ceph_configured',
) { }
@ -44,10 +48,17 @@ class platform::ceph
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled or $configure_ceph_mon_info {
if $system_type == 'All-in-one' and 'simplex' in $system_mode {
# Allow 1 node configurations to work with a single monitor
$mon_initial_members = $mon_0_host
# Set the minimum set of monitors that form a valid cluster
if $system_type == 'All-in-one' {
if $system_mode == 'simplex' {
# 1 node configuration, a single monitor is available
$mon_initial_members = $mon_0_host
} else {
# 2 node configuration, we have a floating monitor
$mon_initial_members = $floating_mon_host
}
} else {
# Multinode, any 2 monitors form a cluster
$mon_initial_members = undef
}
@ -58,21 +69,31 @@ class platform::ceph
} ->
ceph_config {
"mon/mon clock drift allowed": value => ".1";
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
"client.restapi/public_addr": value => $restapi_public_addr;
}
if $system_type == 'All-in-one' {
# 1 and 2 node configurations have a single monitor
if 'duplex' in $system_mode {
# Floating monitor, running on active controller.
Class['::ceph'] ->
ceph_config {
"mon.${mon_1_host}/host": value => $mon_1_host;
"mon.${mon_1_host}/mon_addr": value => $mon_1_addr;
"mon.${floating_mon_host}/host": value => $floating_mon_host;
"mon.${floating_mon_host}/mon_addr": value => $floating_mon_addr;
}
} else {
# Simplex case, a single monitor binded to the controller.
Class['::ceph'] ->
ceph_config {
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
}
}
} else {
# Multinode has 3 monitors.
Class['::ceph'] ->
ceph_config {
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
"mon.${mon_1_host}/host": value => $mon_1_host;
"mon.${mon_1_host}/mon_addr": value => $mon_1_addr;
"mon.${mon_2_host}/host": value => $mon_2_host;
@ -86,44 +107,79 @@ class platform::ceph
}
class platform::ceph::post {
include ::platform::ceph::params
class platform::ceph::post
inherits ::platform::ceph::params {
# Enable ceph process recovery after all configuration is done
file { $::platform::ceph::params::ceph_config_ready_path:
file { $ceph_config_ready_path:
ensure => present,
content => '',
owner => 'root',
group => 'root',
mode => '0644',
}
if $service_enabled {
file { $node_ceph_configured_flag:
ensure => present
}
}
}
class platform::ceph::monitor
inherits ::platform::ceph::params {
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
if str2bool($::is_controller_active) {
# Ceph mon is configured on a DRBD partition, on the active controller,
# when 'ceph' storage backend is added in sysinv.
# Then SM takes care of starting ceph after manifests are applied.
$configure_ceph_mon = true
} else {
$configure_ceph_mon = false
}
} else {
# Simplex, multinode. Ceph is pmon managed.
$configure_ceph_mon = true
}
}
else {
$configure_ceph_mon = false
}
if $configure_ceph_mon {
file { '/var/lib/ceph':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
} ->
}
platform::filesystem { $mon_lv_name:
lv_name => $mon_lv_name,
lv_size => $mon_lv_size,
mountpoint => $mon_mountpoint,
fs_type => $mon_fs_type,
fs_options => $mon_fs_options,
} -> Class['::ceph']
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
# ensure DRBD config is complete before enabling the ceph monitor
Drbd::Resource <| |> -> Class['::ceph']
} else {
File['/var/lib/ceph'] ->
platform::filesystem { $mon_lv_name:
lv_name => $mon_lv_name,
lv_size => $mon_lv_size,
mountpoint => $mon_mountpoint,
fs_type => $mon_fs_type,
fs_options => $mon_fs_options,
} -> Class['::ceph']
file { "/etc/pmon.d/ceph.conf":
ensure => link,
target => "/etc/ceph/ceph.conf.pmon",
owner => 'root',
group => 'root',
mode => '0640',
file { "/etc/pmon.d/ceph.conf":
ensure => link,
target => "/etc/ceph/ceph.conf.pmon",
owner => 'root',
group => 'root',
mode => '0640',
}
}
# ensure configuration is complete before creating monitors
@ -131,9 +187,7 @@ class platform::ceph::monitor
# Start service on AIO SX and on active controller
# to allow in-service configuration.
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if str2bool($::is_controller_active) or ($system_type == 'All-in-one' and $system_mode == 'simplex') {
if str2bool($::is_controller_active) or $system_type == 'All-in-one' {
$service_ensure = "running"
} else {
$service_ensure = "stopped"
@ -146,19 +200,53 @@ class platform::ceph::monitor
service_ensure => $service_ensure,
}
if $::hostname == $mon_0_host {
ceph::mon { $mon_0_host:
public_addr => $mon_0_ip,
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
ceph::mon { $floating_mon_host:
public_addr => $floating_mon_ip,
}
}
elsif $::hostname == $mon_1_host {
ceph::mon { $mon_1_host:
public_addr => $mon_1_ip,
if (str2bool($::is_controller_active) and
str2bool($::is_initial_cinder_ceph_config) and
!str2bool($::is_standalone_controller)) {
# When we configure ceph after both controllers are active,
# we need to stop the monitor, unmount the monitor partition
# and set the drbd role to secondary, so that the handoff to
# SM is done properly once we swact to the standby controller.
# TODO: Remove this once SM supports in-service config reload.
Ceph::Mon <| |> ->
exec { "Stop Ceph monitor":
command =>"/etc/init.d/ceph stop mon",
onlyif => "/etc/init.d/ceph status mon",
logoutput => true,
} ->
exec { "umount ceph-mon partition":
command => "umount $mon_mountpoint",
onlyif => "mount | grep -q $mon_mountpoint",
logoutput => true,
} ->
exec { 'Set cephmon secondary':
command => "drbdadm secondary drbd-cephmon",
unless => "drbdadm role drbd-cephmon | egrep '^Secondary'",
logoutput => true,
}
}
} else {
if $::hostname == $mon_0_host {
ceph::mon { $mon_0_host:
public_addr => $mon_0_ip,
}
}
}
elsif $::hostname == $mon_2_host {
ceph::mon { $mon_2_host:
public_addr => $mon_2_ip,
elsif $::hostname == $mon_1_host {
ceph::mon { $mon_1_host:
public_addr => $mon_1_ip,
}
}
elsif $::hostname == $mon_2_host {
ceph::mon { $mon_2_host:
public_addr => $mon_2_ip,
}
}
}
}

View File

@ -394,6 +394,64 @@ class platform::drbd::dockerdistribution ()
}
}
class platform::drbd::cephmon::params (
$device = '/dev/drbd9',
$lv_name = 'ceph-mon-lv',
$mountpoint = '/var/lib/ceph/mon',
$port = '7788',
$resource_name = 'drbd-cephmon',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::cephmon ()
inherits ::platform::drbd::cephmon::params {
include ::platform::ceph::params
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
#TODO: This will change once we remove the native cinder service
if (str2bool($::is_initial_config_primary) or
(str2bool($::is_controller_active) and str2bool($::is_initial_cinder_ceph_config))
){
# Active controller, first time configuration.
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
} elsif str2bool($::is_standalone_controller){
# Active standalone controller, successive reboots.
$drbd_primary = true
$drbd_initial = undef
$drbd_automount = true
} else {
# Node unlock, reboot or standby configuration
# Do not mount ceph
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
}
if ($::platform::ceph::params::service_enabled and
$system_type == 'All-in-one' and 'duplex' in $system_mode) {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $::platform::ceph::params::mon_lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => true,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
} -> Class['::ceph']
}
}
class platform::drbd(
$service_enable = false,
$service_ensure = 'stopped',
@ -427,6 +485,7 @@ class platform::drbd(
include ::platform::drbd::patch_vault
include ::platform::drbd::etcd
include ::platform::drbd::dockerdistribution
include ::platform::drbd::cephmon
# network changes need to be applied prior to DRBD resources
Anchor['platform::networking'] ->
@ -498,3 +557,8 @@ class platform::drbd::dockerdistribution::runtime {
include ::platform::drbd::params
include ::platform::drbd::dockerdistribution
}
class platform::drbd::cephmon::runtime {
include ::platform::drbd::params
include ::platform::drbd::cephmon
}

View File

@ -13,6 +13,7 @@ class platform::sm
$region_config = $::platform::params::region_config
$region_2_name = $::platform::params::region_2_name
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
include ::platform::network::pxeboot::params
if $::platform::network::pxeboot::params::interface_name {
@ -79,6 +80,11 @@ class platform::sm
$dockerdistribution_fs_device = $::platform::drbd::dockerdistribution::params::device
$dockerdistribution_fs_directory = $::platform::drbd::dockerdistribution::params::mountpoint
include ::platform::drbd::cephmon::params
$cephmon_drbd_resource = $::platform::drbd::cephmon::params::resource_name
$cephmon_fs_device = $::platform::drbd::cephmon::params::device
$cephmon_fs_directory = $::platform::drbd::cephmon::params::mountpoint
include ::openstack::keystone::params
$keystone_api_version = $::openstack::keystone::params::api_version
$keystone_identity_uri = $::openstack::keystone::params::identity_uri
@ -1376,7 +1382,46 @@ class platform::sm
}
if $ceph_configured {
# Ceph-Rest-API
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
exec { 'Provision Cephmon FS in SM (service-group-member cephmon-fs)':
command => "sm-provision service-group-member controller-services cephmon-fs",
} ->
exec { 'Provision Cephmon FS in SM (service cephmon-fs)':
command => "sm-provision service cephmon-fs",
} ->
exec { 'Provision Cephmon DRBD in SM (service-group-member drbd-cephmon':
command => "sm-provision service-group-member controller-services drbd-cephmon",
} ->
exec { 'Provision Cephmon DRBD in SM (service drbd-cephmon)':
command => "sm-provision service drbd-cephmon",
} ->
exec { 'Configure Cephmon DRBD':
command => "sm-configure service_instance drbd-cephmon drbd-cephmon:${hostunit} \"drbd_resource=${cephmon_drbd_resource}\"",
} ->
exec { 'Configure Cephmon FileSystem':
command => "sm-configure service_instance cephmon-fs cephmon-fs \"device=${cephmon_fs_device},directory=${cephmon_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
} ->
exec { 'Configure cephmon':
command => "sm-configure service_instance ceph-mon ceph-mon \"\"",
} ->
exec { 'Provision cephmon (service-group-member)':
command => "sm-provision service-group-member controller-services ceph-mon",
} ->
exec { 'Provision cephmon (service)':
command => "sm-provision service ceph-mon",
} ->
exec { 'Configure ceph-osd':
command => "sm-configure service_instance ceph-osd ceph-osd \"\"",
} ->
exec { 'Provision ceph-osd (service-group-member)':
command => "sm-provision service-group-member storage-services ceph-osd",
} ->
exec { 'Provision ceph-osd (service)':
command => "sm-provision service ceph-osd",
}
}
# Ceph-Rest-Api
exec { 'Provision Ceph-Rest-Api (service-domain-member storage-services)':
command => "sm-provision service-domain-member controller storage-services",
} ->

View File

@ -65,6 +65,7 @@ install -p -D -m 640 etc/sysinv/profileSchema.xsd %{buildroot}%{local_etc_sysinv
#crushtool -d crushmap.bin -o {decompiled-crushmap-filename}
install -p -D -m 655 etc/sysinv/crushmap.bin %{buildroot}%{local_etc_sysinv}/crushmap.bin
install -p -D -m 655 etc/sysinv/crushmap-aio-sx.bin %{buildroot}%{local_etc_sysinv}/crushmap-aio-sx.bin
install -p -D -m 655 etc/sysinv/crushmap-aio-dx.bin %{buildroot}%{local_etc_sysinv}/crushmap-aio-dx.bin
install -d -m 755 %{buildroot}%{local_etc_motdd}
install -p -D -m 755 etc/sysinv/motd-system %{buildroot}%{local_etc_motdd}/10-system

Binary file not shown.

After

Width:  |  Height:  |  Size: 485 B

View File

@ -5187,7 +5187,6 @@ class HostController(rest.RestController):
elif StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.CINDER_BACKEND_CEPH):
ihost_stors = []
if utils.is_aio_simplex_system(pecan.request.dbapi):
# Check if host has enough OSDs configured for each tier
tiers = pecan.request.dbapi.storage_tier_get_all()
@ -5208,27 +5207,51 @@ class HostController(rest.RestController):
% {'replication': str(replication), 'word': word, 'tier': tier['name']})
raise wsme.exc.ClientSideError(msg)
else:
try:
ihost_stors = pecan.request.dbapi.ihost_get_by_personality(
personality=constants.STORAGE)
except Exception:
raise wsme.exc.ClientSideError(
_("Can not unlock a compute node until at "
"least one storage node is unlocked and enabled."))
ihost_stor_unlocked = False
if ihost_stors:
for ihost_stor in ihost_stors:
if (ihost_stor.administrative == constants.ADMIN_UNLOCKED and
(ihost_stor.operational ==
constants.OPERATIONAL_ENABLED)):
if utils.is_aio_duplex_system(pecan.request.dbapi):
host_stors = pecan.request.dbapi.istor_get_by_ihost(ihost['id'])
if not host_stors:
raise wsme.exc.ClientSideError(
_("Can not unlock node until at least one OSD is configured."))
ihost_stor_unlocked = True
break
tiers = pecan.request.dbapi.storage_tier_get_all()
ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers)
# On a two-node configuration, both nodes should have at least one OSD
# in each tier. Otherwise, the cluster is remains in an error state.
for tier in ceph_tiers:
stors = tier['stors']
host_has_osd_in_tier = False
for stor in stors:
if stor['forihostid'] == ihost['id']:
host_has_osd_in_tier = True
if not ihost_stor_unlocked:
raise wsme.exc.ClientSideError(
_("Can not unlock a compute node until at "
"least one storage node is unlocked and enabled."))
if not host_has_osd_in_tier:
raise wsme.exc.ClientSideError(
"Can not unlock node until every storage tier has at least one OSD "
"configured. Tier \"%s\" has no OSD configured." % tier['name'])
else:
storage_nodes = []
try:
storage_nodes = pecan.request.dbapi.ihost_get_by_personality(
personality=constants.STORAGE)
except Exception:
raise wsme.exc.ClientSideError(
_("Can not unlock a compute node until at "
"least one storage node is unlocked and enabled."))
is_storage_host_unlocked = False
if storage_nodes:
for node in storage_nodes:
if (node.administrative == constants.ADMIN_UNLOCKED and
(node.operational ==
constants.OPERATIONAL_ENABLED)):
is_storage_host_unlocked = True
break
if not is_storage_host_unlocked:
raise wsme.exc.ClientSideError(
_("Can not unlock a compute node until at "
"least one storage node is unlocked and enabled."))
# Local Storage checks
self._semantic_check_nova_local_storage(ihost['uuid'],

View File

@ -498,11 +498,11 @@ def _check_host(stor):
raise wsme.exc.ClientSideError(_("Host must be locked"))
# semantic check: whether personality == storage or we have k8s AIO SX
is_k8s_aio_sx = (utils.is_aio_simplex_system(pecan.request.dbapi) and
utils.is_kubernetes_config(pecan.request.dbapi))
if not is_k8s_aio_sx and ihost['personality'] != constants.STORAGE:
msg = ("Host personality must be 'storage' or "
"one node system with kubernetes enabled.")
is_k8s_aio = (utils.is_aio_system(pecan.request.dbapi) and
utils.is_kubernetes_config(pecan.request.dbapi))
if not is_k8s_aio and ihost['personality'] != constants.STORAGE:
msg = ("Host personality must be 'storage' or kubernetes enabled "
"1 or 2 node system")
raise wsme.exc.ClientSideError(_(msg))
# semantic check: whether system has a ceph backend
@ -514,7 +514,7 @@ def _check_host(stor):
"System must have a %s backend" % constants.SB_TYPE_CEPH))
# semantic check: whether at least 2 unlocked hosts are monitors
if not utils.is_aio_simplex_system(pecan.request.dbapi):
if not utils.is_aio_system(pecan.request.dbapi):
ceph_helper = ceph.CephApiOperator()
num_monitors, required_monitors, quorum_names = \
ceph_helper.get_monitors_status(pecan.request.dbapi)

View File

@ -667,7 +667,7 @@ def _check_and_update_rbd_provisioner(new_storceph, remove=False):
validate_k8s_namespaces(K8RbdProvisioner.getListFromNamespaces(new_storceph))
# Check if cluster is configured
if not utils.is_aio_simplex_system(pecan.request.dbapi):
if not utils.is_aio_system(pecan.request.dbapi):
# On multinode is enough if storage hosts are available
storage_hosts = pecan.request.dbapi.ihost_get_by_personality(
constants.STORAGE
@ -969,6 +969,11 @@ def _check_replication_number(new_cap, orig_cap):
(ceph_state, constants.SB_STATE_CONFIGURED)))
else:
if utils.is_aio_duplex_system(pecan.request.dbapi):
# Replication change is not allowed on two node configuration
raise wsme.exc.ClientSideError(
_("Can not modify ceph replication factor on "
"two node configuration."))
# On a standard install we allow modifications of ceph storage
# backend parameters after the manifests have been applied and
# before first storage node has been configured.
@ -1192,7 +1197,7 @@ def _update_pool_quotas(storceph):
def _check_object_gateway_install(dbapi):
# Ensure we have the required number of monitors
if utils.is_aio_simplex_system(dbapi):
if utils.is_aio_system(dbapi):
api_helper.check_minimal_number_of_controllers(1)
else:
api_helper.check_minimal_number_of_controllers(2)

View File

@ -410,7 +410,7 @@ def _check(op, tier):
raise wsme.exc.ClientSideError(_("Storage tier (%s) "
"already present." %
tier['name']))
if utils.is_aio_simplex_system(pecan.request.dbapi):
if utils.is_aio_system(pecan.request.dbapi):
# Deny adding secondary tiers if primary tier backend is not configured
# for cluster. When secondary tier is added we also query ceph to create
# pools and set replication therefore cluster has to be up.

View File

@ -382,6 +382,13 @@ def is_kubernetes_config(dbapi=None):
return system.capabilities.get('kubernetes_enabled', False)
def is_aio_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD)
def is_aio_simplex_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
@ -390,9 +397,13 @@ def is_aio_simplex_system(dbapi=None):
system.system_mode == constants.SYSTEM_MODE_SIMPLEX)
def is_aio_duplex_system():
return get_system_mode() == constants.SYSTEM_MODE_DUPLEX and \
SystemHelper.get_product_build() == constants.TIS_AIO_BUILD
def is_aio_duplex_system(dbapi=None):
if not dbapi:
dbapi = pecan.request.dbapi
system = dbapi.isystem_get_one()
return (system.system_type == constants.TIS_AIO_BUILD and
(system.system_mode == constants.SYSTEM_MODE_DUPLEX or
system.system_mode == constants.SYSTEM_MODE_DUPLEX_DIRECT))
def is_aio_kubernetes(dbapi=None):

View File

@ -712,6 +712,8 @@ def fix_crushmap(dbapi=None):
if not os.path.isfile(crushmap_flag_file):
if utils.is_aio_simplex_system(dbapi):
crushmap_file = "/etc/sysinv/crushmap-aio-sx.bin"
elif utils.is_aio_duplex_system(dbapi):
crushmap_file = "/etc/sysinv/crushmap-aio-dx.bin"
else:
crushmap_file = "/etc/sysinv/crushmap.bin"
LOG.info("Updating crushmap with: %s" % crushmap_file)

View File

@ -205,11 +205,18 @@ class StorageBackendConfig(object):
dbapi.network_get_by_type(
constants.NETWORK_TYPE_INFRA
)
# TODO (sdinescu): create a new floating address for ceph-mon
# Using controller-nfs network name until a new one is created
# for ceph.
floating_network_name = "controller-nfs"
network_type = constants.NETWORK_TYPE_INFRA
except exception.NetworkTypeNotFound:
network_type = constants.NETWORK_TYPE_MGMT
floating_network_name = constants.CONTROLLER_HOSTNAME
targets = {
'%s-%s' % (floating_network_name,
network_type): 'ceph-floating-mon-ip',
'%s-%s' % (constants.CONTROLLER_0_HOSTNAME,
network_type): 'ceph-mon-0-ip',
'%s-%s' % (constants.CONTROLLER_1_HOSTNAME,

View File

@ -5656,11 +5656,10 @@ class ConductorManager(service.PeriodicService):
self.update_service_table_for_cinder()
# TODO(oponcea): Uncomment when SM supports in-service config reload
# ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
# valid_ctrls = [ctrl for ctrl in ctrls if
# ctrl.administrative == constants.ADMIN_UNLOCKED and
# ctrl.availability == constants.AVAILABILITY_AVAILABLE]
host = utils.HostHelper.get_active_controller(self.dbapi)
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
valid_ctrls = [ctrl for ctrl in ctrls if
ctrl.administrative == constants.ADMIN_UNLOCKED and
ctrl.availability == constants.AVAILABILITY_AVAILABLE]
classes = ['platform::partitions::runtime',
'platform::lvm::controller::runtime',
'platform::haproxy::runtime',
@ -5668,14 +5667,20 @@ class ConductorManager(service.PeriodicService):
'platform::filesystem::img_conversions::runtime',
'platform::ceph::controller::runtime',
]
if utils.is_aio_duplex_system(self.dbapi):
# On 2 node systems we have a floating Ceph monitor.
classes.append('platform::drbd::cephmon::runtime')
classes.append('platform::drbd::runtime')
if constants.SB_SVC_GLANCE in services:
classes.append('openstack::glance::api::runtime')
if constants.SB_SVC_CINDER in services:
classes.append('openstack::cinder::runtime')
classes.append('platform::sm::norestart::runtime')
config_dict = {"personalities": personalities,
"host_uuids": host.uuid,
# "host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
# "host_uuids": host.uuid,
"host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
"classes": classes,
puppet_common.REPORT_STATUS_CFG: puppet_common.REPORT_CEPH_BACKEND_CONFIG,
}
@ -5706,9 +5711,13 @@ class ConductorManager(service.PeriodicService):
config_uuid=new_uuid,
config_dict=config_dict)
tasks = {}
for ctrl in valid_ctrls:
tasks[ctrl.hostname] = constants.SB_TASK_APPLY_MANIFESTS
# Update initial task states
values = {'state': constants.SB_STATE_CONFIGURING,
'task': constants.SB_TASK_APPLY_MANIFESTS}
'task': str(tasks)}
self.dbapi.storage_ceph_update(sb_uuid, values)
def config_update_nova_local_backed_hosts(self, context, instance_backing):
@ -6332,7 +6341,7 @@ class ConductorManager(service.PeriodicService):
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
state = constants.SB_STATE_CONFIGURED
if utils.is_aio_simplex_system(self.dbapi):
if utils.is_aio_system(self.dbapi):
task = None
cceph.fix_crushmap(self.dbapi)
else:
@ -6342,7 +6351,41 @@ class ConductorManager(service.PeriodicService):
else:
# TODO(oponcea): Remove when sm supports in-service config reload
# and any logic dealing with constants.SB_TASK_RECONFIG_CONTROLLER.
values = {'task': constants.SB_TASK_RECONFIG_CONTROLLER}
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
# Note that even if nodes are degraded we still accept the answer.
valid_ctrls = [ctrl for ctrl in ctrls if
(ctrl.administrative == constants.ADMIN_LOCKED and
ctrl.availability == constants.AVAILABILITY_ONLINE) or
(ctrl.administrative == constants.ADMIN_UNLOCKED and
ctrl.operational == constants.OPERATIONAL_ENABLED)]
# Set state for current node
for host in valid_ctrls:
if host.uuid == host_uuid:
break
else:
LOG.error("Host %(host) is not in the required state!" % host_uuid)
host = self.dbapi.ihost_get(host_uuid)
if not host:
LOG.error("Host %s is invalid!" % host_uuid)
return
tasks = eval(ceph_conf.get('task', '{}'))
if tasks:
tasks[host.hostname] = constants.SB_STATE_CONFIGURED
else:
tasks = {host.hostname: constants.SB_STATE_CONFIGURED}
config_success = True
for host in valid_ctrls:
if tasks.get(host.hostname, '') != constants.SB_STATE_CONFIGURED:
config_success = False
if ceph_conf.state != constants.SB_STATE_CONFIG_ERR:
if config_success:
values = {'task': constants.SB_TASK_RECONFIG_CONTROLLER}
else:
values = {'task': str(tasks)}
self.dbapi.storage_backend_update(ceph_conf.uuid, values)
# The VIM needs to know when a cinder backend was added.

View File

@ -60,10 +60,12 @@ class CephPuppet(openstack.OpenstackBasePuppet):
mon_0_ip = ceph_mon_ips['ceph-mon-0-ip']
mon_1_ip = ceph_mon_ips['ceph-mon-1-ip']
mon_2_ip = ceph_mon_ips['ceph-mon-2-ip']
floating_mon_ip = ceph_mon_ips['ceph-floating-mon-ip']
mon_0_addr = self._format_ceph_mon_address(mon_0_ip)
mon_1_addr = self._format_ceph_mon_address(mon_1_ip)
mon_2_addr = self._format_ceph_mon_address(mon_2_ip)
floating_mon_addr = self._format_ceph_mon_address(floating_mon_ip)
# ceph can not bind to multiple address families, so only enable IPv6
# if the monitors are IPv6 addresses
@ -77,6 +79,8 @@ class CephPuppet(openstack.OpenstackBasePuppet):
'platform::ceph::params::service_enabled': True,
'platform::ceph::params::floating_mon_host':
constants.CONTROLLER_HOSTNAME,
'platform::ceph::params::mon_0_host':
constants.CONTROLLER_0_HOSTNAME,
'platform::ceph::params::mon_1_host':
@ -84,10 +88,12 @@ class CephPuppet(openstack.OpenstackBasePuppet):
'platform::ceph::params::mon_2_host':
constants.STORAGE_0_HOSTNAME,
'platform::ceph::params::floating_mon_ip': floating_mon_ip,
'platform::ceph::params::mon_0_ip': mon_0_ip,
'platform::ceph::params::mon_1_ip': mon_1_ip,
'platform::ceph::params::mon_2_ip': mon_2_ip,
'platform::ceph::params::floating_mon_addr': floating_mon_addr,
'platform::ceph::params::mon_0_addr': mon_0_addr,
'platform::ceph::params::mon_1_addr': mon_1_addr,
'platform::ceph::params::mon_2_addr': mon_2_addr,