Fix for kubernetes make test fail

Kubernetes make test is failing after merging the review
https://review.opendev.org/c/starlingx/integ/+/907637. This
change is to fix the k8s make test fail.

This change will also decouples the patch
"kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch"
to remove the hardcoded list of namespaces and create a
new patch to identify the platform pods.

Test Plan:
PASS: Run all Kubelet, kubeadm, kubectl make tests for affected code.
PASS: All affected versions of kubernetes package build successfully.
PASS: Create a pod with the platform label. Pod is classified as
      a platform pod.
PASS: Create a pod without the platform label but in a namespace with
      the platform label. Pod is classified as a platform pod.
PASS: Create a pod without the platform label and in a namespace
      without the platform label. Pod is not classified as a platform
      pod.

Closes-Bug: 2058042

Change-Id: I49e93a88f88f8bdbd3dcbf9b0e5ffa13f39e5c24
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
This commit is contained in:
Boovan Rajendran 2024-04-01 09:27:14 -04:00
parent 80e8d0206d
commit cf58ab7448
15 changed files with 2275 additions and 1147 deletions

View File

@ -0,0 +1,410 @@
From 5831e62509bfe1aa1bdbf7755a304fe4557e934c Mon Sep 17 00:00:00 2001
From: Boovan Rajendran <boovan.rajendran@windriver.com>
Date: Thu, 28 Mar 2024 06:13:17 -0400
Subject: [PATCH] Identify platform pods based on pod or namespace labels
Currently, for static CPU allocation, pods are identified
as platform pods using a hard-coded list of namespaces.
This change identifies a pod as a platform pod using label
assigned to it or its namespace.
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
---
pkg/kubelet/cm/cpumanager/policy_static.go | 85 +++++++++--
.../cm/cpumanager/policy_static_test.go | 139 ++++++++++++++++++
.../cm/cpumanager/topology_hints_test.go | 4 +
3 files changed, 217 insertions(+), 11 deletions(-)
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 34fdaaf0db1..a52dc4047f5 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,10 +17,15 @@ limitations under the License.
package cpumanager
import (
+ "context"
"fmt"
"strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
@@ -41,6 +46,19 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+type isKubeInfraFunc func(pod *v1.Pod) bool
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+var varIsKubeInfra isKubeInfraFunc
+
+func init() {
+ varIsKubeInfra = isKubeInfra
+}
+
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -55,11 +73,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server", "node-feature-discovery", "intel-power", "power-metrics", "sriov-fec-system",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -289,8 +302,11 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
// Container belongs in reserved pool.
// We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
@@ -452,7 +468,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
return 0
}
// Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
@@ -673,14 +689,61 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
}
// get the isolated CPUs (if any) from the devices associated with a specific container
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index 1c43df3b85f..e8076a1337f 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -877,6 +880,7 @@ type staticPolicyTestWithResvList struct {
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
+ isKubeInfraPodfunc isKubeInfraFunc
expErr error
expNewErr error
expCPUAlloc bool
@@ -949,6 +953,14 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
}
}
+func fakeIsKubeInfraTrue(pod *v1.Pod) bool {
+ return true
+}
+
+func fakeIsKubeInfraFalse(pod *v1.Pod) bool {
+ return false
+}
+
func TestStaticPolicyAddWithResvList(t *testing.T) {
infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
infraPod.Namespace = "kube-system"
@@ -962,6 +974,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
@@ -975,6 +988,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core
@@ -992,6 +1006,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4, 5),
@@ -1009,6 +1024,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(0, 1),
@@ -1026,6 +1042,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(0),
@@ -1042,6 +1059,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
defaultCPUSet: testCase.stDefaultCPUSet,
}
+ varIsKubeInfra = testCase.isKubeInfraPodfunc
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
@@ -1161,3 +1179,124 @@ func TestStaticPolicyOptions(t *testing.T) {
})
}
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
diff --git a/pkg/kubelet/cm/cpumanager/topology_hints_test.go b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
index 02d064e71e3..d84b8240c30 100644
--- a/pkg/kubelet/cm/cpumanager/topology_hints_test.go
+++ b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
@@ -145,6 +145,7 @@ func TestPodGuaranteedCPUs(t *testing.T) {
expectedCPU: 6,
},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
for _, tc := range tcases {
requestedCPU := p.podGuaranteedCPUs(tc.pod)
@@ -187,6 +188,7 @@ func TestGetTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue
@@ -240,6 +242,7 @@ func TestGetPodTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&tc.pod)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
continue
@@ -423,6 +426,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&testCase.pod)[string(v1.ResourceCPU)]
sort.SliceStable(podHints, func(i, j int) bool {
return podHints[i].LessThan(podHints[j])
--
2.25.1

View File

@ -1,4 +1,4 @@
From 27f0c5ba4cdd5f8f850fa4a5b110a39eaba7cd65 Mon Sep 17 00:00:00 2001
From e8608eb7c300b6e0503885a4848fafc75f20d909 Mon Sep 17 00:00:00 2001
From: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Date: Mon, 7 Nov 2022 13:33:03 -0500
Subject: [PATCH] kubelet cpumanager introduce concept of isolated CPUs
@ -45,14 +45,13 @@ Co-authored-by: Jim Gauld <james.gauld@windriver.com>
Co-authored-by: Chris Friesen <chris.friesen@windriver.com>
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
Signed-off-by: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Signed-off-by: Kaustubh Dhokte <kaustubh.dhokte@windriver.com>
---
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 20 +-
pkg/kubelet/cm/cpumanager/policy_static.go | 158 ++++++++++++++--
.../cm/cpumanager/policy_static_test.go | 177 +++++++++++++++++-
5 files changed, 362 insertions(+), 29 deletions(-)
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++++++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 20 ++++-
pkg/kubelet/cm/cpumanager/policy_static.go | 83 +++++++++++++++++--
.../cm/cpumanager/policy_static_test.go | 53 +++++++++---
5 files changed, 172 insertions(+), 20 deletions(-)
diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
index 44c8cda6c40..a3f92b23c69 100644
@ -243,24 +242,18 @@ index d553b182e0b..57f3f9a1c97 100644
t.Errorf("Expected error, but NewManager succeeded")
}
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 4fc96303622..4631841fe01 100644
index 341e9f3dffe..802e289bfaf 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,14 +17,21 @@ limitations under the License.
package cpumanager
@@ -18,6 +18,7 @@ package cpumanager
import (
+ "context"
"fmt"
+ "strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
@@ -25,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -268,33 +261,7 @@ index 4fc96303622..4631841fe01 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
@@ -39,6 +46,13 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -53,11 +67,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server", "node-feature-discovery", "intel-power", "power-metrics", "sriov-fec-system",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -101,6 +110,10 @@ type staticPolicy struct {
@@ -101,6 +103,10 @@ type staticPolicy struct {
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reserved cpuset.CPUSet
@ -305,7 +272,7 @@ index 4fc96303622..4631841fe01 100644
// If true, default CPUSet should exclude reserved CPUs
excludeReserved bool
// topology manager reference to get container Topology affinity
@@ -117,7 +130,8 @@ var _ Policy = &staticPolicy{}
@@ -117,7 +123,8 @@ var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@ -315,7 +282,7 @@ index 4fc96303622..4631841fe01 100644
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
if err != nil {
return nil, err
@@ -132,6 +146,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -132,6 +139,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
policy := &staticPolicy{
topology: topology,
affinity: affinity,
@ -324,7 +291,7 @@ index 4fc96303622..4631841fe01 100644
excludeReserved: excludeReserved,
cpusToReuse: make(map[string]cpuset.CPUSet),
options: opts,
@@ -158,6 +174,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -158,6 +167,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved)
policy.reserved = reserved
@ -337,7 +304,7 @@ index 4fc96303622..4631841fe01 100644
return policy, nil
}
@@ -191,8 +213,9 @@ func (p *staticPolicy) validateState(s state.State) error {
@@ -191,8 +206,9 @@ func (p *staticPolicy) validateState(s state.State) error {
} else {
s.SetDefaultCPUSet(allCPUs)
}
@ -349,17 +316,7 @@ index 4fc96303622..4631841fe01 100644
return nil
}
@@ -273,6 +296,9 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
if isKubeInfra(pod) {
// Container belongs in reserved pool.
@@ -282,10 +308,11 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -282,10 +298,11 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
return nil
}
@ -373,7 +330,7 @@ index 4fc96303622..4631841fe01 100644
}
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
klog.Infof("[cpumanager] static policy: reserved: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset)
@@ -329,8 +356,34 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -329,8 +346,34 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
}
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
p.updateCPUsToReuse(pod, container, cpuset)
@ -408,71 +365,10 @@ index 4fc96303622..4631841fe01 100644
// container belongs in the shared pool (nothing to do; use default cpuset)
return nil
}
@@ -630,14 +683,91 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
@@ -640,6 +683,36 @@ func isKubeInfra(pod *v1.Pod) bool {
return false
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
+}
+
+// get the isolated CPUs (if any) from the devices associated with a specific container
+func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet {
+ // NOTE: This is required for TestStaticPolicyAdd() since makePod() does
@ -501,28 +397,16 @@ index 4fc96303622..4631841fe01 100644
+ }
+ }
+ return cpuSet
}
+}
+
// isHintSocketAligned function return true if numa nodes in hint are socket aligned.
func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, minAffinitySize int) bool {
numaNodesBitMask := hint.NUMANodeAffinity.GetBits()
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index 414e5ce144c..b6aad48576f 100644
index 414e5ce144c..1c43df3b85f 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -28,6 +31,7 @@ import (
@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -530,7 +414,7 @@ index 414e5ce144c..b6aad48576f 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
@@ -69,8 +73,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
@@ -69,8 +70,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
}
func TestStaticPolicyName(t *testing.T) {
@ -541,7 +425,7 @@ index 414e5ce144c..b6aad48576f 100644
policyName := policy.Name()
if policyName != "static" {
@@ -80,6 +85,7 @@ func TestStaticPolicyName(t *testing.T) {
@@ -80,6 +82,7 @@ func TestStaticPolicyName(t *testing.T) {
}
func TestStaticPolicyStart(t *testing.T) {
@ -549,7 +433,7 @@ index 414e5ce144c..b6aad48576f 100644
testCases := []staticPolicyTest{
{
description: "non-corrupted state",
@@ -155,7 +161,7 @@ func TestStaticPolicyStart(t *testing.T) {
@@ -155,7 +158,7 @@ func TestStaticPolicyStart(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
@ -558,7 +442,7 @@ index 414e5ce144c..b6aad48576f 100644
policy := p.(*staticPolicy)
st := &mockState{
@@ -203,7 +209,6 @@ func TestStaticPolicyAdd(t *testing.T) {
@@ -203,7 +206,6 @@ func TestStaticPolicyAdd(t *testing.T) {
largeTopoCPUSet := largeTopoBuilder.Result()
largeTopoSock0CPUSet := largeTopoSock0Builder.Result()
largeTopoSock1CPUSet := largeTopoSock1Builder.Result()
@ -566,7 +450,7 @@ index 414e5ce144c..b6aad48576f 100644
// these are the cases which must behave the same regardless the policy options.
// So we will permutate the options to ensure this holds true.
@@ -577,8 +582,10 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
@@ -577,8 +579,10 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
if testCase.topologyHint != nil {
tm = topologymanager.NewFakeManagerWithHint(testCase.topologyHint)
}
@ -578,7 +462,7 @@ index 414e5ce144c..b6aad48576f 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -625,6 +632,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
@@ -625,6 +629,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
}
func TestStaticPolicyReuseCPUs(t *testing.T) {
@ -587,7 +471,7 @@ index 414e5ce144c..b6aad48576f 100644
testCases := []struct {
staticPolicyTest
expCSetAfterAlloc cpuset.CPUSet
@@ -649,7 +658,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -649,7 +655,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
}
for _, testCase := range testCases {
@ -596,7 +480,7 @@ index 414e5ce144c..b6aad48576f 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -682,6 +691,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -682,6 +688,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
func TestStaticPolicyRemove(t *testing.T) {
excludeReserved := false
@ -604,7 +488,7 @@ index 414e5ce144c..b6aad48576f 100644
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
@@ -740,7 +750,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -740,7 +747,7 @@ func TestStaticPolicyRemove(t *testing.T) {
}
for _, testCase := range testCases {
@ -613,7 +497,7 @@ index 414e5ce144c..b6aad48576f 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -763,6 +773,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -763,6 +770,7 @@ func TestStaticPolicyRemove(t *testing.T) {
func TestTopologyAwareAllocateCPUs(t *testing.T) {
excludeReserved := false
@ -621,7 +505,7 @@ index 414e5ce144c..b6aad48576f 100644
testCases := []struct {
description string
topo *topology.CPUTopology
@@ -831,7 +842,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
@@ -831,7 +839,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
},
}
for _, tc := range testCases {
@ -631,7 +515,7 @@ index 414e5ce144c..b6aad48576f 100644
policy := p.(*staticPolicy)
st := &mockState{
assignments: tc.stAssignments,
@@ -864,6 +876,7 @@ type staticPolicyTestWithResvList struct {
@@ -864,6 +873,7 @@ type staticPolicyTestWithResvList struct {
topo *topology.CPUTopology
numReservedCPUs int
reserved cpuset.CPUSet
@ -639,7 +523,7 @@ index 414e5ce144c..b6aad48576f 100644
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
@@ -874,6 +887,8 @@ type staticPolicyTestWithResvList struct {
@@ -874,6 +884,8 @@ type staticPolicyTestWithResvList struct {
}
func TestStaticPolicyStartWithResvList(t *testing.T) {
@ -648,7 +532,7 @@ index 414e5ce144c..b6aad48576f 100644
testCases := []staticPolicyTestWithResvList{
{
description: "empty cpuset",
@@ -903,11 +918,10 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
@@ -903,11 +915,10 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
},
}
@ -661,7 +545,7 @@ index 414e5ce144c..b6aad48576f 100644
if !reflect.DeepEqual(err, testCase.expNewErr) {
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
testCase.description, testCase.expNewErr, err)
@@ -947,6 +961,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -947,6 +958,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 1,
reserved: cpuset.NewCPUSet(0),
@ -669,7 +553,7 @@ index 414e5ce144c..b6aad48576f 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
@@ -959,6 +974,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -959,6 +971,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
@ -677,7 +561,7 @@ index 414e5ce144c..b6aad48576f 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
@@ -971,6 +987,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -971,6 +984,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
@ -685,7 +569,7 @@ index 414e5ce144c..b6aad48576f 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
@@ -987,6 +1004,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -987,6 +1001,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
@ -693,7 +577,7 @@ index 414e5ce144c..b6aad48576f 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
@@ -998,11 +1016,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -998,11 +1013,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(0, 1),
},
@ -724,131 +608,6 @@ index 414e5ce144c..b6aad48576f 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -1128,3 +1164,124 @@ func TestStaticPolicyOptions(t *testing.T) {
})
}
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
--
2.25.1

View File

@ -9,3 +9,4 @@ kubelet-CFS-quota-throttling-for-non-integer-cpulimit.patch
Revert-kubeadm-remove-RemoveOldControlPlaneLabel.patch
Revert-kubeadm-cleanup-the-master-taint-on-CP-nodes-.patch
kubeadm-reduce-UpgradeManifestTimeout.patch
Identify-platform-pods-based-on-pod-or-namespace-labels.patch

View File

@ -0,0 +1,411 @@
From 83f176cb68b9356dfc53103f8e356cd58d2f1daf Mon Sep 17 00:00:00 2001
From: Boovan Rajendran <boovan.rajendran@windriver.com>
Date: Thu, 28 Mar 2024 11:28:39 -0400
Subject: [PATCH] Identify platform pods based on pod or namespace labels
Currently, for static CPU allocation, pods are identified
as platform pods using a hard-coded list of namespaces.
This change identifies a pod as a platform pod using label
assigned to it or its namespace.
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
---
pkg/kubelet/cm/cpumanager/policy_static.go | 85 +++++++++--
.../cm/cpumanager/policy_static_test.go | 140 +++++++++++++++++-
.../cm/cpumanager/topology_hints_test.go | 4 +
3 files changed, 217 insertions(+), 12 deletions(-)
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 125429035a5..9368aeaf825 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,10 +17,15 @@ limitations under the License.
package cpumanager
import (
+ "context"
"fmt"
"strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
@@ -43,6 +48,19 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+type isKubeInfraFunc func(pod *v1.Pod) bool
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+var varIsKubeInfra isKubeInfraFunc
+
+func init() {
+ varIsKubeInfra = isKubeInfra
+}
+
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -58,11 +76,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server", "node-feature-discovery", "intel-power", "power-metrics", "sriov-fec-system",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -297,8 +310,11 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
// Container belongs in reserved pool.
// We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
@@ -470,7 +486,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
return 0
}
// Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
@@ -691,14 +707,61 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
}
// get the isolated CPUs (if any) from the devices associated with a specific container
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index 1c43df3b85f..58e7aeee04f 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -877,6 +880,7 @@ type staticPolicyTestWithResvList struct {
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
+ isKubeInfraPodfunc isKubeInfraFunc
expErr error
expNewErr error
expCPUAlloc bool
@@ -949,6 +953,14 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
}
}
+func fakeIsKubeInfraTrue(pod *v1.Pod) bool {
+ return true
+}
+
+func fakeIsKubeInfraFalse(pod *v1.Pod) bool {
+ return false
+}
+
func TestStaticPolicyAddWithResvList(t *testing.T) {
infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
infraPod.Namespace = "kube-system"
@@ -962,6 +974,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
@@ -975,6 +988,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core
@@ -992,6 +1006,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4, 5),
@@ -1009,6 +1024,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(0, 1),
@@ -1026,6 +1042,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(0),
@@ -1041,7 +1058,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
-
+ varIsKubeInfra = testCase.isKubeInfraPodfunc
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
@@ -1161,3 +1178,124 @@ func TestStaticPolicyOptions(t *testing.T) {
})
}
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
diff --git a/pkg/kubelet/cm/cpumanager/topology_hints_test.go b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
index 5b6951cb2d7..d1ff1288306 100644
--- a/pkg/kubelet/cm/cpumanager/topology_hints_test.go
+++ b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
@@ -145,6 +145,7 @@ func TestPodGuaranteedCPUs(t *testing.T) {
expectedCPU: 6,
},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
for _, tc := range tcases {
requestedCPU := p.podGuaranteedCPUs(tc.pod)
@@ -187,6 +188,7 @@ func TestGetTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue
@@ -240,6 +242,7 @@ func TestGetPodTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&tc.pod)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
continue
@@ -423,6 +426,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&testCase.pod)[string(v1.ResourceCPU)]
sort.SliceStable(podHints, func(i, j int) bool {
return podHints[i].LessThan(podHints[j])
--
2.25.1

View File

@ -1,7 +1,7 @@
From 36170864cc9ebb2183e6301cb745e49238d21397 Mon Sep 17 00:00:00 2001
From ed1f8c6a04e7fed096eaae5081c2b5e0c3bc6fed Mon Sep 17 00:00:00 2001
From: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Date: Mon, 7 Nov 2022 13:33:03 -0500
Subject: [PATCH] kubelet cpumanager introduce concept of isolated CPUs
Subject: [PATCH 08/10] kubelet cpumanager introduce concept of isolated CPUs
This introduces the concept of "isolated CPUs", which are CPUs that
have been isolated at the kernel level via the "isolcpus" kernel boot
@ -46,15 +46,14 @@ Co-authored-by: Chris Friesen <chris.friesen@windriver.com>
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
Signed-off-by: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com>
Signed-off-by: Kaustubh Dhokte <kaustubh.dhokte@windriver.com>
---
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 23 ++-
pkg/kubelet/cm/cpumanager/policy_static.go | 157 +++++++++++++--
.../cm/cpumanager/policy_static_test.go | 178 +++++++++++++++++-
pkg/kubelet/cm/devicemanager/manager_stub.go | 99 ++++++++++
6 files changed, 463 insertions(+), 30 deletions(-)
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 ++++++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 23 ++++-
pkg/kubelet/cm/cpumanager/policy_static.go | 83 ++++++++++++++--
.../cm/cpumanager/policy_static_test.go | 53 ++++++++--
pkg/kubelet/cm/devicemanager/manager_stub.go | 99 +++++++++++++++++++
6 files changed, 273 insertions(+), 21 deletions(-)
create mode 100644 pkg/kubelet/cm/devicemanager/manager_stub.go
diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
@ -265,25 +264,18 @@ index e7c74453472..78b4ada1a73 100644
testCases := []struct {
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 26eb400cee6..13bcad5a531 100644
index 180d018565c..8d18ce65309 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,15 +17,22 @@ limitations under the License.
package cpumanager
@@ -18,6 +18,7 @@ package cpumanager
import (
+ "context"
"fmt"
+ "strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
@@ -26,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -291,32 +283,7 @@ index 26eb400cee6..13bcad5a531 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/metrics"
@@ -41,6 +48,12 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -56,11 +69,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server", "node-feature-discovery", "intel-power", "power-metrics", "sriov-fec-system",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -104,6 +112,10 @@ type staticPolicy struct {
@@ -104,6 +106,10 @@ type staticPolicy struct {
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reserved cpuset.CPUSet
@ -327,7 +294,7 @@ index 26eb400cee6..13bcad5a531 100644
// If true, default CPUSet should exclude reserved CPUs
excludeReserved bool
// topology manager reference to get container Topology affinity
@@ -120,7 +132,8 @@ var _ Policy = &staticPolicy{}
@@ -120,7 +126,8 @@ var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@ -337,7 +304,7 @@ index 26eb400cee6..13bcad5a531 100644
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
if err != nil {
return nil, err
@@ -135,6 +148,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -135,6 +142,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
policy := &staticPolicy{
topology: topology,
affinity: affinity,
@ -346,7 +313,7 @@ index 26eb400cee6..13bcad5a531 100644
excludeReserved: excludeReserved,
cpusToReuse: make(map[string]cpuset.CPUSet),
options: opts,
@@ -161,6 +176,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -161,6 +170,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved)
policy.reserved = reserved
@ -359,7 +326,7 @@ index 26eb400cee6..13bcad5a531 100644
return policy, nil
}
@@ -194,8 +215,9 @@ func (p *staticPolicy) validateState(s state.State) error {
@@ -194,8 +209,9 @@ func (p *staticPolicy) validateState(s state.State) error {
} else {
s.SetDefaultCPUSet(allCPUs)
}
@ -371,17 +338,7 @@ index 26eb400cee6..13bcad5a531 100644
return nil
}
@@ -281,6 +303,9 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
if isKubeInfra(pod) {
// Container belongs in reserved pool.
@@ -290,16 +315,39 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -290,16 +306,39 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
return nil
}
@ -423,7 +380,7 @@ index 26eb400cee6..13bcad5a531 100644
numCPUs := p.guaranteedCPUs(pod, container)
if numCPUs == 0 {
// container belongs in the shared pool (nothing to do; use default cpuset)
@@ -348,7 +396,9 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -348,7 +387,9 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
}
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
p.updateCPUsToReuse(pod, container, cpuset)
@ -434,71 +391,10 @@ index 26eb400cee6..13bcad5a531 100644
return nil
}
@@ -647,14 +697,91 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
@@ -657,6 +698,36 @@ func isKubeInfra(pod *v1.Pod) bool {
return false
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
+}
+
+// get the isolated CPUs (if any) from the devices associated with a specific container
+func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet {
+ // NOTE: This is required for TestStaticPolicyAdd() since makePod() does
@ -527,28 +423,16 @@ index 26eb400cee6..13bcad5a531 100644
+ }
+ }
+ return cpuSet
}
+}
+
// isHintSocketAligned function return true if numa nodes in hint are socket aligned.
func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, minAffinitySize int) bool {
numaNodesBitMask := hint.NUMANodeAffinity.GetBits()
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index 414e5ce144c..f79c23accb4 100644
index 414e5ce144c..1c43df3b85f 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -28,6 +31,7 @@ import (
@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -556,7 +440,7 @@ index 414e5ce144c..f79c23accb4 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
@@ -69,8 +73,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
@@ -69,8 +70,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
}
func TestStaticPolicyName(t *testing.T) {
@ -567,7 +451,7 @@ index 414e5ce144c..f79c23accb4 100644
policyName := policy.Name()
if policyName != "static" {
@@ -80,6 +85,7 @@ func TestStaticPolicyName(t *testing.T) {
@@ -80,6 +82,7 @@ func TestStaticPolicyName(t *testing.T) {
}
func TestStaticPolicyStart(t *testing.T) {
@ -575,7 +459,7 @@ index 414e5ce144c..f79c23accb4 100644
testCases := []staticPolicyTest{
{
description: "non-corrupted state",
@@ -155,7 +161,7 @@ func TestStaticPolicyStart(t *testing.T) {
@@ -155,7 +158,7 @@ func TestStaticPolicyStart(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
@ -584,7 +468,7 @@ index 414e5ce144c..f79c23accb4 100644
policy := p.(*staticPolicy)
st := &mockState{
@@ -203,7 +209,6 @@ func TestStaticPolicyAdd(t *testing.T) {
@@ -203,7 +206,6 @@ func TestStaticPolicyAdd(t *testing.T) {
largeTopoCPUSet := largeTopoBuilder.Result()
largeTopoSock0CPUSet := largeTopoSock0Builder.Result()
largeTopoSock1CPUSet := largeTopoSock1Builder.Result()
@ -592,7 +476,7 @@ index 414e5ce144c..f79c23accb4 100644
// these are the cases which must behave the same regardless the policy options.
// So we will permutate the options to ensure this holds true.
@@ -577,8 +582,10 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
@@ -577,8 +579,10 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
if testCase.topologyHint != nil {
tm = topologymanager.NewFakeManagerWithHint(testCase.topologyHint)
}
@ -604,7 +488,7 @@ index 414e5ce144c..f79c23accb4 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -625,6 +632,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
@@ -625,6 +629,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
}
func TestStaticPolicyReuseCPUs(t *testing.T) {
@ -613,7 +497,7 @@ index 414e5ce144c..f79c23accb4 100644
testCases := []struct {
staticPolicyTest
expCSetAfterAlloc cpuset.CPUSet
@@ -649,7 +658,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -649,7 +655,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
}
for _, testCase := range testCases {
@ -622,7 +506,7 @@ index 414e5ce144c..f79c23accb4 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -682,6 +691,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -682,6 +688,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
func TestStaticPolicyRemove(t *testing.T) {
excludeReserved := false
@ -630,7 +514,7 @@ index 414e5ce144c..f79c23accb4 100644
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
@@ -740,7 +750,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -740,7 +747,7 @@ func TestStaticPolicyRemove(t *testing.T) {
}
for _, testCase := range testCases {
@ -639,7 +523,7 @@ index 414e5ce144c..f79c23accb4 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -763,6 +773,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -763,6 +770,7 @@ func TestStaticPolicyRemove(t *testing.T) {
func TestTopologyAwareAllocateCPUs(t *testing.T) {
excludeReserved := false
@ -647,7 +531,7 @@ index 414e5ce144c..f79c23accb4 100644
testCases := []struct {
description string
topo *topology.CPUTopology
@@ -831,7 +842,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
@@ -831,7 +839,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
},
}
for _, tc := range testCases {
@ -657,7 +541,7 @@ index 414e5ce144c..f79c23accb4 100644
policy := p.(*staticPolicy)
st := &mockState{
assignments: tc.stAssignments,
@@ -864,6 +876,7 @@ type staticPolicyTestWithResvList struct {
@@ -864,6 +873,7 @@ type staticPolicyTestWithResvList struct {
topo *topology.CPUTopology
numReservedCPUs int
reserved cpuset.CPUSet
@ -665,7 +549,7 @@ index 414e5ce144c..f79c23accb4 100644
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
@@ -874,6 +887,8 @@ type staticPolicyTestWithResvList struct {
@@ -874,6 +884,8 @@ type staticPolicyTestWithResvList struct {
}
func TestStaticPolicyStartWithResvList(t *testing.T) {
@ -674,7 +558,7 @@ index 414e5ce144c..f79c23accb4 100644
testCases := []staticPolicyTestWithResvList{
{
description: "empty cpuset",
@@ -903,11 +918,10 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
@@ -903,11 +915,10 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
},
}
@ -687,7 +571,7 @@ index 414e5ce144c..f79c23accb4 100644
if !reflect.DeepEqual(err, testCase.expNewErr) {
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
testCase.description, testCase.expNewErr, err)
@@ -947,6 +961,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -947,6 +958,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 1,
reserved: cpuset.NewCPUSet(0),
@ -695,7 +579,7 @@ index 414e5ce144c..f79c23accb4 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
@@ -959,6 +974,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -959,6 +971,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
@ -703,7 +587,7 @@ index 414e5ce144c..f79c23accb4 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
@@ -971,6 +987,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -971,6 +984,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
@ -711,7 +595,7 @@ index 414e5ce144c..f79c23accb4 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
@@ -987,6 +1004,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -987,6 +1001,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
@ -719,7 +603,7 @@ index 414e5ce144c..f79c23accb4 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
@@ -998,11 +1016,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -998,11 +1013,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(0, 1),
},
@ -750,132 +634,6 @@ index 414e5ce144c..f79c23accb4 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -1128,3 +1164,125 @@ func TestStaticPolicyOptions(t *testing.T) {
})
}
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
+
diff --git a/pkg/kubelet/cm/devicemanager/manager_stub.go b/pkg/kubelet/cm/devicemanager/manager_stub.go
new file mode 100644
index 00000000000..e6874f88d8a

View File

@ -8,3 +8,4 @@ kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch
Affinity-of-guaranteed-pod-to-non-isolated-CPUs.patch
kubelet-CFS-quota-throttling-for-non-integer-cpulimit.patch
kubeadm-reduce-UpgradeManifestTimeout.patch
Identify-platform-pods-based-on-pod-or-namespace-labels.patch

View File

@ -0,0 +1,411 @@
From 8e71d74c0a8450c1146ac7beca834621705b1822 Mon Sep 17 00:00:00 2001
From: Boovan Rajendran <boovan.rajendran@windriver.com>
Date: Mon, 1 Apr 2024 03:28:34 -0400
Subject: [PATCH] Identify platform pods based on pod or namespace labels
Currently, for static CPU allocation, pods are identified
as platform pods using a hard-coded list of namespaces.
This change identifies a pod as a platform pod using label
assigned to it or its namespace.
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
---
pkg/kubelet/cm/cpumanager/policy_static.go | 85 +++++++++--
.../cm/cpumanager/policy_static_test.go | 140 +++++++++++++++++-
.../cm/cpumanager/topology_hints_test.go | 4 +
3 files changed, 217 insertions(+), 12 deletions(-)
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 9d67f4bb68a..69fd6680182 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,10 +17,15 @@ limitations under the License.
package cpumanager
import (
+ "context"
"fmt"
"strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@@ -45,6 +50,19 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+type isKubeInfraFunc func(pod *v1.Pod) bool
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+var varIsKubeInfra isKubeInfraFunc
+
+func init() {
+ varIsKubeInfra = isKubeInfra
+}
+
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -64,11 +82,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -323,8 +336,11 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
// Container belongs in reserved pool.
// We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
@@ -522,7 +538,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
return 0
}
// Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
@@ -743,14 +759,61 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
}
// get the isolated CPUs (if any) from the devices associated with a specific container
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index c25ee484a94..1397f754c42 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -926,6 +929,7 @@ type staticPolicyTestWithResvList struct {
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
+ isKubeInfraPodfunc isKubeInfraFunc
expErr error
expNewErr error
expCPUAlloc bool
@@ -998,6 +1002,14 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
}
}
+func fakeIsKubeInfraTrue(pod *v1.Pod) bool {
+ return true
+}
+
+func fakeIsKubeInfraFalse(pod *v1.Pod) bool {
+ return false
+}
+
func TestStaticPolicyAddWithResvList(t *testing.T) {
infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
infraPod.Namespace = "kube-system"
@@ -1011,6 +1023,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
@@ -1024,6 +1037,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(4), // expect sibling of partial core
@@ -1041,6 +1055,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(4, 5),
@@ -1058,6 +1073,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(0, 1),
@@ -1075,6 +1091,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(0),
@@ -1090,7 +1107,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
-
+ varIsKubeInfra = testCase.isKubeInfraPodfunc
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
@@ -1215,3 +1232,124 @@ func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
diff --git a/pkg/kubelet/cm/cpumanager/topology_hints_test.go b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
index 3cd5c85740b..e1303c90418 100644
--- a/pkg/kubelet/cm/cpumanager/topology_hints_test.go
+++ b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
@@ -145,6 +145,7 @@ func TestPodGuaranteedCPUs(t *testing.T) {
expectedCPU: 6,
},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
for _, tc := range tcases {
requestedCPU := p.podGuaranteedCPUs(tc.pod)
@@ -187,6 +188,7 @@ func TestGetTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue
@@ -240,6 +242,7 @@ func TestGetPodTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&tc.pod)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
continue
@@ -423,6 +426,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&testCase.pod)[string(v1.ResourceCPU)]
sort.SliceStable(podHints, func(i, j int) bool {
return podHints[i].LessThan(podHints[j])
--
2.25.1

View File

@ -1,4 +1,4 @@
From 8cdc168daa7fa8adc3a47c2e40900e4bf435babe Mon Sep 17 00:00:00 2001
From b51d6c0ba6dfd9a34c7f6832d17840820f9985eb Mon Sep 17 00:00:00 2001
From: Boovan Rajendran <boovan.rajendran@windriver.com>
Date: Fri, 8 Sep 2023 10:46:07 -0400
Subject: [PATCH] kubelet cpumanager introduce concept of isolated CPUs
@ -47,15 +47,14 @@ Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
Signed-off-by: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com>
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
Signed-off-by: Kaustubh Dhokte <kaustubh.dhokte@windriver.com>
---
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 23 ++-
pkg/kubelet/cm/cpumanager/policy_static.go | 157 ++++++++++++++--
.../cm/cpumanager/policy_static_test.go | 177 +++++++++++++++++-
pkg/kubelet/cm/devicemanager/manager_stub.go | 99 ++++++++++
6 files changed, 462 insertions(+), 30 deletions(-)
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 ++++++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 23 ++++-
pkg/kubelet/cm/cpumanager/policy_static.go | 83 ++++++++++++++--
.../cm/cpumanager/policy_static_test.go | 53 ++++++++--
pkg/kubelet/cm/devicemanager/manager_stub.go | 99 +++++++++++++++++++
6 files changed, 273 insertions(+), 21 deletions(-)
create mode 100644 pkg/kubelet/cm/devicemanager/manager_stub.go
diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
@ -266,27 +265,18 @@ index bb69b0ac084..44a88429a12 100644
testCases := []struct {
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 1fdb49b52ad..99990fb596a 100644
index 1fdb49b52ad..49f63dd9efd 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,10 +17,16 @@ limitations under the License.
package cpumanager
@@ -18,6 +18,7 @@ package cpumanager
import (
+ "context"
"fmt"
+ "strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
+ "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
@@ -28,6 +34,7 @@ import (
@@ -28,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -294,32 +284,7 @@ index 1fdb49b52ad..99990fb596a 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/metrics"
@@ -43,6 +50,12 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -62,11 +75,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -110,6 +118,10 @@ type staticPolicy struct {
@@ -110,6 +112,10 @@ type staticPolicy struct {
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reservedCPUs cpuset.CPUSet
@ -330,7 +295,7 @@ index 1fdb49b52ad..99990fb596a 100644
// If true, default CPUSet should exclude reserved CPUs
excludeReserved bool
// Superset of reservedCPUs. It includes not just the reservedCPUs themselves,
@@ -132,7 +144,8 @@ var _ Policy = &staticPolicy{}
@@ -132,7 +138,8 @@ var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@ -340,7 +305,7 @@ index 1fdb49b52ad..99990fb596a 100644
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
if err != nil {
return nil, err
@@ -147,6 +160,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -147,6 +154,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
policy := &staticPolicy{
topology: topology,
affinity: affinity,
@ -349,7 +314,7 @@ index 1fdb49b52ad..99990fb596a 100644
excludeReserved: excludeReserved,
cpusToReuse: make(map[string]cpuset.CPUSet),
options: opts,
@@ -183,6 +198,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -183,6 +192,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
policy.reservedCPUs = reserved
policy.reservedPhysicalCPUs = reservedPhysicalCPUs
@ -362,7 +327,7 @@ index 1fdb49b52ad..99990fb596a 100644
return policy, nil
}
@@ -216,8 +237,9 @@ func (p *staticPolicy) validateState(s state.State) error {
@@ -216,8 +231,9 @@ func (p *staticPolicy) validateState(s state.State) error {
} else {
s.SetDefaultCPUSet(allCPUs)
}
@ -374,17 +339,7 @@ index 1fdb49b52ad..99990fb596a 100644
return nil
}
@@ -307,6 +329,9 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
if isKubeInfra(pod) {
// Container belongs in reserved pool.
@@ -316,16 +341,39 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -316,16 +332,39 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
return nil
}
@ -426,7 +381,7 @@ index 1fdb49b52ad..99990fb596a 100644
numCPUs := p.guaranteedCPUs(pod, container)
if numCPUs == 0 {
// container belongs in the shared pool (nothing to do; use default cpuset)
@@ -391,7 +439,9 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -391,7 +430,9 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
}
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
p.updateCPUsToReuse(pod, container, cpuset)
@ -437,71 +392,10 @@ index 1fdb49b52ad..99990fb596a 100644
return nil
}
@@ -699,14 +749,91 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
@@ -709,6 +750,36 @@ func isKubeInfra(pod *v1.Pod) bool {
return false
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
+}
+
+// get the isolated CPUs (if any) from the devices associated with a specific container
+func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet {
+ // NOTE: This is required for TestStaticPolicyAdd() since makePod() does
@ -530,28 +424,16 @@ index 1fdb49b52ad..99990fb596a 100644
+ }
+ }
+ return cpuSet
}
+}
+
// isHintSocketAligned function return true if numa nodes in hint are socket aligned.
func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, minAffinitySize int) bool {
numaNodesBitMask := hint.NUMANodeAffinity.GetBits()
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index 63f31486d19..b0ce9d497d9 100644
index 63f31486d19..c25ee484a94 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -28,6 +31,7 @@ import (
@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -559,7 +441,7 @@ index 63f31486d19..b0ce9d497d9 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
@@ -70,8 +74,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
@@ -70,8 +71,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
}
func TestStaticPolicyName(t *testing.T) {
@ -570,7 +452,7 @@ index 63f31486d19..b0ce9d497d9 100644
policyName := policy.Name()
if policyName != "static" {
@@ -81,6 +86,7 @@ func TestStaticPolicyName(t *testing.T) {
@@ -81,6 +83,7 @@ func TestStaticPolicyName(t *testing.T) {
}
func TestStaticPolicyStart(t *testing.T) {
@ -578,7 +460,7 @@ index 63f31486d19..b0ce9d497d9 100644
testCases := []staticPolicyTest{
{
description: "non-corrupted state",
@@ -156,7 +162,7 @@ func TestStaticPolicyStart(t *testing.T) {
@@ -156,7 +159,7 @@ func TestStaticPolicyStart(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
@ -587,7 +469,7 @@ index 63f31486d19..b0ce9d497d9 100644
policy := p.(*staticPolicy)
st := &mockState{
@@ -204,7 +210,6 @@ func TestStaticPolicyAdd(t *testing.T) {
@@ -204,7 +207,6 @@ func TestStaticPolicyAdd(t *testing.T) {
largeTopoCPUSet := cpuset.New(largeTopoCPUids...)
largeTopoSock0CPUSet := cpuset.New(largeTopoSock0CPUids...)
largeTopoSock1CPUSet := cpuset.New(largeTopoSock1CPUids...)
@ -595,7 +477,7 @@ index 63f31486d19..b0ce9d497d9 100644
// these are the cases which must behave the same regardless the policy options.
// So we will permutate the options to ensure this holds true.
@@ -627,7 +632,9 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
@@ -627,7 +629,9 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
cpus = testCase.reservedCPUs.Clone()
}
testExcl := false
@ -606,7 +488,7 @@ index 63f31486d19..b0ce9d497d9 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -674,6 +681,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
@@ -674,6 +678,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
}
func TestStaticPolicyReuseCPUs(t *testing.T) {
@ -615,7 +497,7 @@ index 63f31486d19..b0ce9d497d9 100644
testCases := []struct {
staticPolicyTest
expCSetAfterAlloc cpuset.CPUSet
@@ -698,7 +707,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -698,7 +704,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
}
for _, testCase := range testCases {
@ -624,7 +506,7 @@ index 63f31486d19..b0ce9d497d9 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -731,6 +740,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -731,6 +737,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
func TestStaticPolicyRemove(t *testing.T) {
excludeReserved := false
@ -632,7 +514,7 @@ index 63f31486d19..b0ce9d497d9 100644
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
@@ -789,7 +799,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -789,7 +796,7 @@ func TestStaticPolicyRemove(t *testing.T) {
}
for _, testCase := range testCases {
@ -641,7 +523,7 @@ index 63f31486d19..b0ce9d497d9 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -812,6 +822,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -812,6 +819,7 @@ func TestStaticPolicyRemove(t *testing.T) {
func TestTopologyAwareAllocateCPUs(t *testing.T) {
excludeReserved := false
@ -649,7 +531,7 @@ index 63f31486d19..b0ce9d497d9 100644
testCases := []struct {
description string
topo *topology.CPUTopology
@@ -880,7 +891,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
@@ -880,7 +888,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
},
}
for _, tc := range testCases {
@ -659,7 +541,7 @@ index 63f31486d19..b0ce9d497d9 100644
policy := p.(*staticPolicy)
st := &mockState{
assignments: tc.stAssignments,
@@ -913,6 +925,7 @@ type staticPolicyTestWithResvList struct {
@@ -913,6 +922,7 @@ type staticPolicyTestWithResvList struct {
topo *topology.CPUTopology
numReservedCPUs int
reserved cpuset.CPUSet
@ -667,7 +549,7 @@ index 63f31486d19..b0ce9d497d9 100644
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
@@ -923,6 +936,8 @@ type staticPolicyTestWithResvList struct {
@@ -923,6 +933,8 @@ type staticPolicyTestWithResvList struct {
}
func TestStaticPolicyStartWithResvList(t *testing.T) {
@ -676,7 +558,7 @@ index 63f31486d19..b0ce9d497d9 100644
testCases := []staticPolicyTestWithResvList{
{
description: "empty cpuset",
@@ -952,10 +967,9 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
@@ -952,10 +964,9 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
},
}
@ -688,7 +570,7 @@ index 63f31486d19..b0ce9d497d9 100644
if !reflect.DeepEqual(err, testCase.expNewErr) {
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
@@ -996,6 +1010,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -996,6 +1007,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 1,
reserved: cpuset.New(0),
@ -696,7 +578,7 @@ index 63f31486d19..b0ce9d497d9 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
@@ -1008,6 +1023,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1008,6 +1020,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
@ -704,7 +586,7 @@ index 63f31486d19..b0ce9d497d9 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
@@ -1020,6 +1036,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1020,6 +1033,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
@ -712,7 +594,7 @@ index 63f31486d19..b0ce9d497d9 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(2, 3, 6, 7),
@@ -1036,6 +1053,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1036,6 +1050,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
@ -720,7 +602,7 @@ index 63f31486d19..b0ce9d497d9 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(2, 3, 6, 7),
@@ -1047,11 +1065,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1047,11 +1062,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
expCPUAlloc: true,
expCSet: cpuset.New(0, 1),
},
@ -751,131 +633,6 @@ index 63f31486d19..b0ce9d497d9 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -1182,3 +1218,124 @@ func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
diff --git a/pkg/kubelet/cm/devicemanager/manager_stub.go b/pkg/kubelet/cm/devicemanager/manager_stub.go
new file mode 100644
index 00000000000..e6874f88d8a

View File

@ -8,3 +8,4 @@ kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch
Affinity-of-guaranteed-pod-to-non-isolated-CPUs.patch
kubelet-CFS-quota-throttling-for-non-integer-cpulimit.patch
kubeadm-reduce-UpgradeManifestTimeout.patch
Identify-platform-pods-based-on-pod-or-namespace-labels.patch

View File

@ -0,0 +1,411 @@
From 8e71d74c0a8450c1146ac7beca834621705b1822 Mon Sep 17 00:00:00 2001
From: Boovan Rajendran <boovan.rajendran@windriver.com>
Date: Mon, 1 Apr 2024 03:28:34 -0400
Subject: [PATCH] Identify platform pods based on pod or namespace labels
Currently, for static CPU allocation, pods are identified
as platform pods using a hard-coded list of namespaces.
This change identifies a pod as a platform pod using label
assigned to it or its namespace.
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
---
pkg/kubelet/cm/cpumanager/policy_static.go | 85 +++++++++--
.../cm/cpumanager/policy_static_test.go | 140 +++++++++++++++++-
.../cm/cpumanager/topology_hints_test.go | 4 +
3 files changed, 217 insertions(+), 12 deletions(-)
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 9d67f4bb68a..69fd6680182 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,10 +17,15 @@ limitations under the License.
package cpumanager
import (
+ "context"
"fmt"
"strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@@ -45,6 +50,19 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+type isKubeInfraFunc func(pod *v1.Pod) bool
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+var varIsKubeInfra isKubeInfraFunc
+
+func init() {
+ varIsKubeInfra = isKubeInfra
+}
+
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -64,11 +82,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -323,8 +336,11 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
// Container belongs in reserved pool.
// We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
@@ -522,7 +538,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
return 0
}
// Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
@@ -743,14 +759,61 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
}
// get the isolated CPUs (if any) from the devices associated with a specific container
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index c25ee484a94..1397f754c42 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -926,6 +929,7 @@ type staticPolicyTestWithResvList struct {
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
+ isKubeInfraPodfunc isKubeInfraFunc
expErr error
expNewErr error
expCPUAlloc bool
@@ -998,6 +1002,14 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
}
}
+func fakeIsKubeInfraTrue(pod *v1.Pod) bool {
+ return true
+}
+
+func fakeIsKubeInfraFalse(pod *v1.Pod) bool {
+ return false
+}
+
func TestStaticPolicyAddWithResvList(t *testing.T) {
infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
infraPod.Namespace = "kube-system"
@@ -1011,6 +1023,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
@@ -1024,6 +1037,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(4), // expect sibling of partial core
@@ -1041,6 +1055,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(4, 5),
@@ -1058,6 +1073,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(0, 1),
@@ -1075,6 +1091,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(0),
@@ -1090,7 +1107,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
-
+ varIsKubeInfra = testCase.isKubeInfraPodfunc
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
@@ -1215,3 +1232,124 @@ func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
diff --git a/pkg/kubelet/cm/cpumanager/topology_hints_test.go b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
index 3cd5c85740b..e1303c90418 100644
--- a/pkg/kubelet/cm/cpumanager/topology_hints_test.go
+++ b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
@@ -145,6 +145,7 @@ func TestPodGuaranteedCPUs(t *testing.T) {
expectedCPU: 6,
},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
for _, tc := range tcases {
requestedCPU := p.podGuaranteedCPUs(tc.pod)
@@ -187,6 +188,7 @@ func TestGetTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue
@@ -240,6 +242,7 @@ func TestGetPodTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&tc.pod)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
continue
@@ -423,6 +426,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&testCase.pod)[string(v1.ResourceCPU)]
sort.SliceStable(podHints, func(i, j int) bool {
return podHints[i].LessThan(podHints[j])
--
2.25.1

View File

@ -1,4 +1,4 @@
From 4f74e4f9bbfd2909a3c740cb6a1b5233af277f72 Mon Sep 17 00:00:00 2001
From 856dfbe0960418618262998ebce65d0ae070c1bb Mon Sep 17 00:00:00 2001
From: Saba Touheed Mujawar <sabatouheed.mujawar@windriver.com>
Date: Fri, 1 Dec 2023 07:42:14 -0500
Subject: [PATCH] kubelet cpumanager introduce concept of isolated CPUs
@ -48,15 +48,14 @@ Signed-off-by: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com>
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
Signed-off-by: Saba Touheed Mujawar <sabatouheed.mujawar@windriver.com>
Signed-off-by: Kaustubh Dhokte <kaustubh.dhokte@windriver.com>
---
pkg/kubelet/cm/container_manager_linux.go | 1 +
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 23 ++-
pkg/kubelet/cm/cpumanager/policy_static.go | 157 +++++++++++++--
.../cm/cpumanager/policy_static_test.go | 178 +++++++++++++++++-
pkg/kubelet/cm/devicemanager/manager_stub.go | 110 +++++++++++
6 files changed, 474 insertions(+), 30 deletions(-)
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++++-
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 23 +++-
pkg/kubelet/cm/cpumanager/policy_static.go | 83 ++++++++++++-
.../cm/cpumanager/policy_static_test.go | 53 +++++++--
pkg/kubelet/cm/devicemanager/manager_stub.go | 110 ++++++++++++++++++
6 files changed, 284 insertions(+), 21 deletions(-)
create mode 100644 pkg/kubelet/cm/devicemanager/manager_stub.go
diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
@ -267,26 +266,18 @@ index daecd35f67b..2298cc037fe 100644
testCases := []struct {
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index 9b7545c2207..e32803306c0 100644
index 9b7545c2207..e9a2defd848 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,9 +17,15 @@ limitations under the License.
package cpumanager
@@ -18,6 +18,7 @@ package cpumanager
import (
+ "context"
"fmt"
+ "strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@@ -27,6 +33,7 @@ import (
@@ -27,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
@ -294,32 +285,7 @@ index 9b7545c2207..e32803306c0 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/metrics"
@@ -43,6 +50,12 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -62,11 +75,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -110,6 +118,10 @@ type staticPolicy struct {
@@ -110,6 +112,10 @@ type staticPolicy struct {
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reservedCPUs cpuset.CPUSet
@ -330,7 +296,7 @@ index 9b7545c2207..e32803306c0 100644
// If true, default CPUSet should exclude reserved CPUs
excludeReserved bool
// Superset of reservedCPUs. It includes not just the reservedCPUs themselves,
@@ -132,7 +144,8 @@ var _ Policy = &staticPolicy{}
@@ -132,7 +138,8 @@ var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@ -340,7 +306,7 @@ index 9b7545c2207..e32803306c0 100644
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
if err != nil {
return nil, err
@@ -147,6 +160,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -147,6 +154,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
policy := &staticPolicy{
topology: topology,
affinity: affinity,
@ -349,7 +315,7 @@ index 9b7545c2207..e32803306c0 100644
excludeReserved: excludeReserved,
cpusToReuse: make(map[string]cpuset.CPUSet),
options: opts,
@@ -183,6 +198,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
@@ -183,6 +192,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
policy.reservedCPUs = reserved
policy.reservedPhysicalCPUs = reservedPhysicalCPUs
@ -362,7 +328,7 @@ index 9b7545c2207..e32803306c0 100644
return policy, nil
}
@@ -216,8 +237,9 @@ func (p *staticPolicy) validateState(s state.State) error {
@@ -216,8 +231,9 @@ func (p *staticPolicy) validateState(s state.State) error {
} else {
s.SetDefaultCPUSet(allCPUs)
}
@ -374,17 +340,7 @@ index 9b7545c2207..e32803306c0 100644
return nil
}
@@ -307,6 +329,9 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
if isKubeInfra(pod) {
// Container belongs in reserved pool.
@@ -316,16 +341,39 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -316,16 +332,39 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
return nil
}
@ -426,7 +382,7 @@ index 9b7545c2207..e32803306c0 100644
numCPUs := p.guaranteedCPUs(pod, container)
if numCPUs == 0 {
// container belongs in the shared pool (nothing to do; use default cpuset)
@@ -391,7 +439,9 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
@@ -391,7 +430,9 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
}
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
p.updateCPUsToReuse(pod, container, cpuset)
@ -437,71 +393,10 @@ index 9b7545c2207..e32803306c0 100644
return nil
}
@@ -699,14 +749,91 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
@@ -709,6 +750,36 @@ func isKubeInfra(pod *v1.Pod) bool {
return false
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
+}
+
+// get the isolated CPUs (if any) from the devices associated with a specific container
+func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet {
+ // NOTE: This is required for TestStaticPolicyAdd() since makePod() does
@ -530,27 +425,16 @@ index 9b7545c2207..e32803306c0 100644
+ }
+ }
+ return cpuSet
}
+}
+
// isHintSocketAligned function return true if numa nodes in hint are socket aligned.
func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, minAffinitySize int) bool {
numaNodesBitMask := hint.NUMANodeAffinity.GetBits()
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index b864c6c57c6..d94f8fdac14 100644
index b864c6c57c6..cb363bb29ab 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,16 +17,20 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -27,6 +27,7 @@ import (
pkgfeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
@ -558,7 +442,7 @@ index b864c6c57c6..d94f8fdac14 100644
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/utils/cpuset"
@@ -70,8 +74,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
@@ -70,8 +71,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
}
func TestStaticPolicyName(t *testing.T) {
@ -569,7 +453,7 @@ index b864c6c57c6..d94f8fdac14 100644
policyName := policy.Name()
if policyName != "static" {
@@ -81,6 +86,7 @@ func TestStaticPolicyName(t *testing.T) {
@@ -81,6 +83,7 @@ func TestStaticPolicyName(t *testing.T) {
}
func TestStaticPolicyStart(t *testing.T) {
@ -577,7 +461,7 @@ index b864c6c57c6..d94f8fdac14 100644
testCases := []staticPolicyTest{
{
description: "non-corrupted state",
@@ -156,7 +162,7 @@ func TestStaticPolicyStart(t *testing.T) {
@@ -156,7 +159,7 @@ func TestStaticPolicyStart(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
@ -586,7 +470,7 @@ index b864c6c57c6..d94f8fdac14 100644
policy := p.(*staticPolicy)
st := &mockState{
@@ -204,7 +210,6 @@ func TestStaticPolicyAdd(t *testing.T) {
@@ -204,7 +207,6 @@ func TestStaticPolicyAdd(t *testing.T) {
largeTopoCPUSet := cpuset.New(largeTopoCPUids...)
largeTopoSock0CPUSet := cpuset.New(largeTopoSock0CPUids...)
largeTopoSock1CPUSet := cpuset.New(largeTopoSock1CPUids...)
@ -594,7 +478,7 @@ index b864c6c57c6..d94f8fdac14 100644
// these are the cases which must behave the same regardless the policy options.
// So we will permutate the options to ensure this holds true.
@@ -627,7 +632,9 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
@@ -627,7 +629,9 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
cpus = testCase.reservedCPUs.Clone()
}
testExcl := false
@ -605,7 +489,7 @@ index b864c6c57c6..d94f8fdac14 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -674,6 +681,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
@@ -674,6 +678,8 @@ func runStaticPolicyTestCaseWithFeatureGate(t *testing.T, testCase staticPolicyT
}
func TestStaticPolicyReuseCPUs(t *testing.T) {
@ -614,7 +498,7 @@ index b864c6c57c6..d94f8fdac14 100644
testCases := []struct {
staticPolicyTest
expCSetAfterAlloc cpuset.CPUSet
@@ -698,7 +707,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -698,7 +704,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
}
for _, testCase := range testCases {
@ -623,7 +507,7 @@ index b864c6c57c6..d94f8fdac14 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -731,6 +740,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
@@ -731,6 +737,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
func TestStaticPolicyRemove(t *testing.T) {
excludeReserved := false
@ -631,7 +515,7 @@ index b864c6c57c6..d94f8fdac14 100644
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
@@ -789,7 +799,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -789,7 +796,7 @@ func TestStaticPolicyRemove(t *testing.T) {
}
for _, testCase := range testCases {
@ -640,7 +524,7 @@ index b864c6c57c6..d94f8fdac14 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -812,6 +822,7 @@ func TestStaticPolicyRemove(t *testing.T) {
@@ -812,6 +819,7 @@ func TestStaticPolicyRemove(t *testing.T) {
func TestTopologyAwareAllocateCPUs(t *testing.T) {
excludeReserved := false
@ -648,7 +532,7 @@ index b864c6c57c6..d94f8fdac14 100644
testCases := []struct {
description string
topo *topology.CPUTopology
@@ -880,7 +891,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
@@ -880,7 +888,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
},
}
for _, tc := range testCases {
@ -658,7 +542,7 @@ index b864c6c57c6..d94f8fdac14 100644
policy := p.(*staticPolicy)
st := &mockState{
assignments: tc.stAssignments,
@@ -913,6 +925,7 @@ type staticPolicyTestWithResvList struct {
@@ -913,6 +922,7 @@ type staticPolicyTestWithResvList struct {
topo *topology.CPUTopology
numReservedCPUs int
reserved cpuset.CPUSet
@ -666,7 +550,7 @@ index b864c6c57c6..d94f8fdac14 100644
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
@@ -923,6 +936,8 @@ type staticPolicyTestWithResvList struct {
@@ -923,6 +933,8 @@ type staticPolicyTestWithResvList struct {
}
func TestStaticPolicyStartWithResvList(t *testing.T) {
@ -675,7 +559,7 @@ index b864c6c57c6..d94f8fdac14 100644
testCases := []staticPolicyTestWithResvList{
{
description: "empty cpuset",
@@ -952,10 +967,9 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
@@ -952,10 +964,9 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
},
}
@ -687,7 +571,7 @@ index b864c6c57c6..d94f8fdac14 100644
if !reflect.DeepEqual(err, testCase.expNewErr) {
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
@@ -996,6 +1010,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -996,6 +1007,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 1,
reserved: cpuset.New(0),
@ -695,7 +579,7 @@ index b864c6c57c6..d94f8fdac14 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
@@ -1008,6 +1023,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1008,6 +1020,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
@ -703,7 +587,7 @@ index b864c6c57c6..d94f8fdac14 100644
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
@@ -1020,6 +1036,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1020,6 +1033,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
@ -711,7 +595,7 @@ index b864c6c57c6..d94f8fdac14 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(2, 3, 6, 7),
@@ -1036,6 +1053,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1036,6 +1050,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
@ -719,7 +603,7 @@ index b864c6c57c6..d94f8fdac14 100644
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(2, 3, 6, 7),
@@ -1047,11 +1065,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
@@ -1047,11 +1062,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
expCPUAlloc: true,
expCSet: cpuset.New(0, 1),
},
@ -750,132 +634,6 @@ index b864c6c57c6..d94f8fdac14 100644
st := &mockState{
assignments: testCase.stAssignments,
@@ -1182,3 +1218,125 @@ func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
}
+
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
+
diff --git a/pkg/kubelet/cm/devicemanager/manager_stub.go b/pkg/kubelet/cm/devicemanager/manager_stub.go
new file mode 100644
index 00000000000..98abcde2519

View File

@ -8,3 +8,4 @@ kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch
Affinity-of-guaranteed-pod-to-non-isolated-CPUs.patch
kubelet-CFS-quota-throttling-for-non-integer-cpulimit.patch
kubeadm-reduce-UpgradeManifestTimeout.patch
Identify-platform-pods-based-on-pod-or-namespace-labels.patch

View File

@ -0,0 +1,414 @@
From fde4c06e769779181e0e09e444c4b7345a062cbf Mon Sep 17 00:00:00 2001
From: Boovan Rajendran <boovan.rajendran@windriver.com>
Date: Mon, 1 Apr 2024 06:44:45 -0400
Subject: [PATCH] Identify platform pods based on pod or namespace labels
Currently, for static CPU allocation, pods are identified
as platform pods using a hard-coded list of namespaces.
This change identifies a pod as a platform pod using label
assigned to it or its namespace.
Signed-off-by: Boovan Rajendran <boovan.rajendran@windriver.com>
---
pkg/kubelet/cm/cpumanager/policy_static.go | 85 +++++++++--
.../cm/cpumanager/policy_static_test.go | 140 +++++++++++++++++-
.../cm/cpumanager/topology_hints_test.go | 4 +
3 files changed, 217 insertions(+), 12 deletions(-)
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
index ee0afd378cf..a9f4af89a4e 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
@@ -17,10 +17,15 @@ limitations under the License.
package cpumanager
import (
+ "context"
"fmt"
"strconv"
+ k8sclient "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@@ -46,6 +51,19 @@ const (
ErrorSMTAlignment = "SMTAlignmentError"
)
+type getPodNamespace func(string) (*v1.Namespace, error)
+type buildFromConfigFlag func(masterUrl string, kubeconfigPath string) (*restclient.Config, error)
+type isKubeInfraFunc func(pod *v1.Pod) bool
+
+var varGetNamespaceObject getPodNamespace
+var varBuildConfigFromFlags buildFromConfigFlag
+var varIsKubeInfra isKubeInfraFunc
+
+func init() {
+ varIsKubeInfra = isKubeInfra
+}
+
+
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
@@ -65,11 +83,6 @@ func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
-// Define namespaces used by platform infrastructure pods
-var infraNamespaces = [...]string{
- "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server",
-}
-
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
@@ -330,8 +343,11 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = clientcmd.BuildConfigFromFlags
// Process infra pods before guaranteed pods
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
// Container belongs in reserved pool.
// We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
@@ -529,7 +545,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
return 0
}
// Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
- if isKubeInfra(pod) {
+ if varIsKubeInfra(pod) {
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
@@ -757,14 +773,61 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return hints
}
+func getPodNamespaceObject(podNamespaceName string) (*v1.Namespace, error) {
+
+ cfg, err := varBuildConfigFromFlags("", "/etc/kubernetes/kubelet.conf")
+ if err != nil {
+ klog.Error("Failed to build client config from /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ clientset, err := k8sclient.NewForConfig(cfg)
+ if err != nil {
+ klog.Error("Failed to get clientset for KUBECONFIG /etc/kubernetes/kubelet.conf: ", err.Error())
+ return nil, err
+ }
+
+ namespaceObj, err := clientset.CoreV1().Namespaces().Get(context.TODO(), podNamespaceName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error("Error getting namespace object:", err.Error())
+ return nil, err
+ }
+
+ return namespaceObj, nil
+
+}
+
// check if a given pod is in a platform infrastructure namespace
func isKubeInfra(pod *v1.Pod) bool {
- for _, namespace := range infraNamespaces {
- if namespace == pod.Namespace {
- return true
- }
+
+ podName := pod.GetName()
+ podNamespaceName := pod.GetNamespace()
+
+ klog.InfoS("Checking pod ", podName , " for label 'app.starlingx.io/component=platform'.")
+ podLabels := pod.GetLabels()
+ val, ok := podLabels["app.starlingx.io/component"]
+ if (ok && val == "platform") {
+ klog.InfoS("Pod ", podName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
}
+
+ klog.InfoS("Pod ", pod.GetName(), " does not have 'app.starlingx.io/component=platform' label. Checking its namespace information...")
+
+ namespaceObj, err := varGetNamespaceObject(podNamespaceName)
+ if err != nil {
+ return false
+ }
+
+ namespaceLabels := namespaceObj.GetLabels()
+ val, ok = namespaceLabels["app.starlingx.io/component"]
+ if ok && val == "platform" {
+ klog.InfoS("For pod: ", podName, ", its Namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Assigning platform CPUs.")
+ return true
+ }
+
+ klog.InfoS("Neither pod ", podName, " nor its namespace ", podNamespaceName, " has 'app.starlingx.io/component=platform' label. Not assigning platform CPUs.")
return false
+
}
// get the isolated CPUs (if any) from the devices associated with a specific container
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
index 0e926c1b1e6..3e7bf484d0f 100644
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package cpumanager
import (
+ "errors"
"fmt"
"reflect"
"testing"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ restclient "k8s.io/client-go/rest"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -973,6 +976,7 @@ type staticPolicyTestWithResvList struct {
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
+ isKubeInfraPodfunc isKubeInfraFunc
expErr error
expNewErr error
expCPUAlloc bool
@@ -1045,6 +1049,14 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
}
}
+func fakeIsKubeInfraTrue(pod *v1.Pod) bool {
+ return true
+}
+
+func fakeIsKubeInfraFalse(pod *v1.Pod) bool {
+ return false
+}
+
func TestStaticPolicyAddWithResvList(t *testing.T) {
infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
infraPod.Namespace = "kube-system"
@@ -1058,6 +1070,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: fmt.Errorf("not enough cpus available to satisfy request: requested=8, available=7"),
expCPUAlloc: false,
expCSet: cpuset.New(),
@@ -1071,6 +1084,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(4), // expect sibling of partial core
@@ -1088,6 +1102,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
+ isKubeInfraPodfunc: fakeIsKubeInfraFalse,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(4, 5),
@@ -1105,6 +1120,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(0, 1),
@@ -1122,6 +1138,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
},
stDefaultCPUSet: cpuset.New(4, 5),
pod: infraPod,
+ isKubeInfraPodfunc: fakeIsKubeInfraTrue,
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(0),
@@ -1137,7 +1154,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
-
+ varIsKubeInfra = testCase.isKubeInfraPodfunc
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
@@ -1258,6 +1275,127 @@ func TestStaticPolicyOptions(t *testing.T) {
}
}
+func makePodWithLabels(podLabels map[string]string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pod",
+ Namespace: "test-namespace",
+ Labels: podLabels,
+ },
+ }
+}
+
+func fakeBuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ return &restclient.Config{}, nil
+}
+
+func fakeBuildConfigFromFlagsError(masterUrl string, kubeconfigPath string) (*restclient.Config, error) {
+
+ errString := fmt.Sprintf("%s file not found", kubeconfigPath)
+ return nil, errors.New(errString)
+
+}
+
+func getFakeInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "app.starlingx.io/component": "platform",
+ },
+ }}, nil
+}
+
+func getFakeNonInfraPodNamespace(_ string) (*v1.Namespace, error) {
+
+ return &v1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-namespace",
+ Labels: map[string]string{
+ "fake": "label",
+ }}}, nil
+
+}
+
+type kubeInfraPodTestCase struct {
+ description string
+ pod *v1.Pod
+ namespaceFunc getPodNamespace
+ expectedValue bool
+}
+
+func TestKubeInfraPod(t *testing.T) {
+ testCases := []kubeInfraPodTestCase{
+ {
+ description: "Pod with platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "app.starlingx.io/component": "platform",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: true,
+
+ },
+ {
+ description: "Pod without platform label and namespace with platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "label",
+ }),
+ namespaceFunc: getFakeInfraPodNamespace,
+ expectedValue: true,
+ },
+ {
+ description: "Pod without platform label and namespace without platform label",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ },
+
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+
+ varGetNamespaceObject = testCase.namespaceFunc
+ varBuildConfigFromFlags = fakeBuildConfigFromFlags
+ gotValue := isKubeInfra(testCase.pod)
+
+ if gotValue != testCase.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ testCase.description, testCase.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", testCase.description)
+ }
+
+ })
+ }
+
+ test := kubeInfraPodTestCase{
+ description: "Failure reading kubeconfig file",
+ pod: makePodWithLabels(map[string]string{
+ "test": "namespace",
+ }),
+ namespaceFunc: getFakeNonInfraPodNamespace,
+ expectedValue: false,
+ }
+
+ varGetNamespaceObject = getPodNamespaceObject
+ varBuildConfigFromFlags = fakeBuildConfigFromFlagsError
+
+ gotValue := isKubeInfra(test.pod)
+
+ if gotValue != test.expectedValue {
+ t.Errorf("StaticPolicy isKubeInfraPod() error %v. expected value %v actual value %v",
+ test.description, test.expectedValue, gotValue)
+ } else {
+ fmt.Printf("StaticPolicy isKubeInfraPod() test successful. : %v ", test.description)
+ }
+
+}
+
func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
diff --git a/pkg/kubelet/cm/cpumanager/topology_hints_test.go b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
index 53738b613c2..ad9c0f17602 100644
--- a/pkg/kubelet/cm/cpumanager/topology_hints_test.go
+++ b/pkg/kubelet/cm/cpumanager/topology_hints_test.go
@@ -197,6 +197,7 @@ func TestPodGuaranteedCPUs(t *testing.T) {
expectedCPU: 210,
},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
for _, tc := range tcases {
t.Run(tc.name, func(t *testing.T) {
requestedCPU := p.podGuaranteedCPUs(tc.pod)
@@ -241,6 +242,7 @@ func TestGetTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue
@@ -294,6 +296,7 @@ func TestGetPodTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&tc.pod)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
continue
@@ -477,6 +480,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
+ varIsKubeInfra = fakeIsKubeInfraFalse
podHints := m.GetPodTopologyHints(&testCase.pod)[string(v1.ResourceCPU)]
sort.SliceStable(podHints, func(i, j int) bool {
return podHints[i].LessThan(podHints[j])
--
2.25.1

View File

@ -0,0 +1,33 @@
From 341756eb57cea280d96e74f56d02033402cee133 Mon Sep 17 00:00:00 2001
From: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
Date: Fri, 15 Mar 2024 03:49:15 -0400
Subject: [PATCH] kubeadm: reduce UpgradeManifestTimeout
This modifies kubeadm UpgradeManifestTimeout from 5 minutes default
to 3 minutes to reduce the unnecessary delay in retries during
kubeadm-upgrade-apply failures.
The typical control-plane upgrade of static pods is 75 to 85 seconds,
so 3 minutes gives adequate buffer to complete the operation.
Signed-off-by: Ramesh Kumar Sivanandam <rameshkumar.sivanandam@windriver.com>
---
cmd/kubeadm/app/phases/upgrade/staticpods.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go
index 540c1549fff..b40bc76f0fc 100644
--- a/cmd/kubeadm/app/phases/upgrade/staticpods.go
+++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go
@@ -46,7 +46,7 @@ import (
const (
// UpgradeManifestTimeout is timeout of upgrading the static pod manifest
- UpgradeManifestTimeout = 5 * time.Minute
+ UpgradeManifestTimeout = 3 * time.Minute
)
// StaticPodPathManager is responsible for tracking the directories used in the static pod upgrade transition
--
2.25.1

View File

@ -7,3 +7,5 @@ kubelet-cpumanager-infra-pods-use-system-reserved-CP.patch
kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch
Affinity-of-guaranteed-pod-to-non-isolated-CPUs.patch
kubelet-CFS-quota-throttling-for-non-integer-cpulimit.patch
kubeadm-reduce-UpgradeManifestTimeout.patch
Identify-platform-pods-based-on-pod-or-namespace-labels.patch