From 4b35404d6a03c4bfe6ea12e176d8624710a10b2c Mon Sep 17 00:00:00 2001 From: Don Penney Date: Thu, 21 Feb 2019 11:33:30 -0500 Subject: [PATCH 1/3] Ignore error on k8s taint removal from puppet There are cases where the kubernetes taint is not present on, or has already been removed from, a newly configured standby controller. This causes the taint removal command run by the puppet manifest to fail. This failure can be safely ignored, so the command is updated by this commit to always return success. Change-Id: Icdb55738e052c65a28e44582e345038b0de83c37 Closes-Bug: 1815795 Signed-off-by: Don Penney --- puppet-manifests/src/modules/platform/manifests/kubernetes.pp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/puppet-manifests/src/modules/platform/manifests/kubernetes.pp b/puppet-manifests/src/modules/platform/manifests/kubernetes.pp index 97bc539674..9f75b8ee1e 100644 --- a/puppet-manifests/src/modules/platform/manifests/kubernetes.pp +++ b/puppet-manifests/src/modules/platform/manifests/kubernetes.pp @@ -124,7 +124,7 @@ class platform::kubernetes::master::init # Remove the taint from the master node -> exec { 'remove taint from master node': - command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-", # lint:ignore:140chars + command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master- || true", # lint:ignore:140chars logoutput => true, } @@ -230,7 +230,7 @@ class platform::kubernetes::master::init # Remove the taint from the master node -> exec { 'remove taint from master node': - command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-", # lint:ignore:140chars + command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master- || true", # lint:ignore:140chars logoutput => true, } From cba2b66e9b27efc077b89fb5e661b8dffc890fd8 Mon Sep 17 00:00:00 2001 From: Erich Cordoba Date: Thu, 21 Feb 2019 11:21:28 -0600 Subject: [PATCH 2/3] Move DNS requirement into kubernetes::master This was causing a failure in computes unlock process where the Platform::Dns class cannot be found. Closes-bug: 1817126 Change-Id: I0a9e9b60580944a49b9672803fc05216f204b222 Signed-off-by: Erich Cordoba --- .../src/modules/platform/manifests/kubernetes.pp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/puppet-manifests/src/modules/platform/manifests/kubernetes.pp b/puppet-manifests/src/modules/platform/manifests/kubernetes.pp index 97bc539674..7b2f22c0b8 100644 --- a/puppet-manifests/src/modules/platform/manifests/kubernetes.pp +++ b/puppet-manifests/src/modules/platform/manifests/kubernetes.pp @@ -16,14 +16,10 @@ class platform::kubernetes::kubeadm { $iptables_file = "net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1" - # Ensure DNS is configured as name resolution is required when - # kubeadm init is run. - Class['::platform::dns'] - # Update iptables config. This is required based on: # https://kubernetes.io/docs/tasks/tools/install-kubeadm # This probably belongs somewhere else - initscripts package? - -> file { '/etc/sysctl.d/k8s.conf': + file { '/etc/sysctl.d/k8s.conf': ensure => file, content => $iptables_file, owner => 'root', @@ -271,6 +267,9 @@ class platform::kubernetes::master Class['::platform::etcd'] -> Class[$name] Class['::platform::docker::config'] -> Class[$name] + # Ensure DNS is configured as name resolution is required when + # kubeadm init is run. + Class['::platform::dns'] -> Class[$name] Class['::platform::kubernetes::kubeadm'] -> Class['::platform::kubernetes::master::init'] -> Class['::platform::kubernetes::firewall'] From 52a829d1803056da8222f30dcc002c39c86c6f54 Mon Sep 17 00:00:00 2001 From: Matt Peters Date: Thu, 21 Feb 2019 11:20:15 -0500 Subject: [PATCH 3/3] Temporarily disable iptables restore during puppet Docker and kubernetes add rules to iptables, which can end up persisted in /etc/sysconfig/iptables by calls to iptables-save. When the puppet manifest is applied during node initialization, kubernetes is not yet running, and any related iptables rules will fail. This update disables the restoration of iptables rules from previous boots, to ensure the puppet manifest does not fail to apply due to invalid rules. However, this means that in a DOR scenario (Dead Office Recovery, where both controllers will be intializing at the same time), the firewall rules will not get reapplied. Firewall management will be moved to Calico under story 2005066, at which point this code will be removed. Change-Id: I43369dba34e6859088af3794de25a68571c7154c Closes-Bug: 1815124 Signed-off-by: Don Penney --- puppet-manifests/src/manifests/controller.pp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/puppet-manifests/src/manifests/controller.pp b/puppet-manifests/src/manifests/controller.pp index 0154b13430..a4a18273ae 100644 --- a/puppet-manifests/src/manifests/controller.pp +++ b/puppet-manifests/src/manifests/controller.pp @@ -7,7 +7,15 @@ Exec { path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin' } -include ::firewall +# +# Disable the firewall to protect against attempted +# restoration of kubernetes-related iptables rules +# during puppet apply, as kubernetes may not yet +# be running and the restore will fail. +# +class { '::firewall': + ensure => stopped +} include ::platform::config include ::platform::users