From d2f6c88f9031945c66c556cd29c8210da7736741 Mon Sep 17 00:00:00 2001 From: Robert Church Date: Wed, 22 May 2019 20:55:14 -0400 Subject: [PATCH] Set osd_pool_default_size based on deployment The ceph.conf file packaged in the Ceph RPM sets 'osd_pool_default_size = 2'. This is a valid initial value for most deployments. The exception is for the AIO-SX single OSD installation (which is our default minimum AIO-SX configuration). In this deployment configuration, this value will produce a HEALTH_WARN specifying "Degraded data redundancy". This commit will set 'osd_pool_default_size' based on the deployment and specifically set it to '1' for the AIO-SX. This will provide a HEALTH_OK cluster on controller unlock. If/when additional OSDs are added, the 'system storage-backend-modify' command can be used to change the replication factor to provide a higher level of data redundancy. This change removes the long-stanging need to run the following command when provisioning the AIO-SX: ceph osd pool ls | xargs -i ceph osd pool set {} size 1 This will also now enable automatic loading of the platform-integ-apps k8s application and subsequent loading of the rbd-provisioner for persistent volume claims on the AIO-SX. Change-Id: I901b339f1c7770aa16a7bbfecf193d0c1e5e9eaa Story: 2005424 Task: 33471 Signed-off-by: Robert Church --- puppet-manifests/centos/build_srpm.data | 2 +- .../src/modules/platform/manifests/ceph.pp | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/puppet-manifests/centos/build_srpm.data b/puppet-manifests/centos/build_srpm.data index afeec2dd88..39044bff35 100644 --- a/puppet-manifests/centos/build_srpm.data +++ b/puppet-manifests/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="src" -TIS_PATCH_VER=88 +TIS_PATCH_VER=89 diff --git a/puppet-manifests/src/modules/platform/manifests/ceph.pp b/puppet-manifests/src/modules/platform/manifests/ceph.pp index 442584702b..487a280ba4 100644 --- a/puppet-manifests/src/modules/platform/manifests/ceph.pp +++ b/puppet-manifests/src/modules/platform/manifests/ceph.pp @@ -53,19 +53,24 @@ class platform::ceph if $system_mode == 'simplex' { # 1 node configuration, a single monitor is available $mon_initial_members = $mon_0_host + $osd_pool_default_size = 1 } else { # 2 node configuration, we have a floating monitor $mon_initial_members = $floating_mon_host + $osd_pool_default_size = 2 } } else { # Multinode & standard, any 2 monitors form a cluster $mon_initial_members = undef + $osd_pool_default_size = 2 } class { '::ceph': - fsid => $cluster_uuid, - authentication_type => $authentication_type, - mon_initial_members => $mon_initial_members + fsid => $cluster_uuid, + authentication_type => $authentication_type, + mon_initial_members => $mon_initial_members, + osd_pool_default_size => $osd_pool_default_size, + osd_pool_default_min_size => 1 } -> ceph_config { 'mon/mon clock drift allowed': value => '.1'; @@ -83,7 +88,7 @@ class platform::ceph # Simplex case, a single monitor binded to the controller. Class['::ceph'] -> ceph_config { - "mon.${mon_0_host}/host": value => $mon_0_host; + "mon.${mon_0_host}/host": value => $mon_0_host; "mon.${mon_0_host}/mon_addr": value => $mon_0_addr; } }