From de3b9749f765398d4064c3225caa0a960d27eff3 Mon Sep 17 00:00:00 2001 From: Chris Friesen Date: Thu, 9 Apr 2020 12:52:19 -0600 Subject: [PATCH 5/6] kubelet cpumanager introduce concept of isolated CPUs This introduces the concept of "isolated CPUs", which are CPUs that have been isolated at the kernel level via the "isolcpus" kernel boot parameter. When starting the kubelet process, two separate sets of reserved CPUs may be specified. With this change CPUs reserved via '--system-reserved=cpu' will be used for infrastructure pods while the isolated CPUs should be reserved via '--kube-reserved=cpu' to cause kubelet to skip over them for "normal" CPU resource tracking. The kubelet code will double-check that the specified isolated CPUs match what the kernel exposes in "/sys/devices/system/cpu/isolated". A plugin (outside the scope of this commit) will expose the isolated CPUs to kubelet via the device plugin API. If a pod specifies some number of "isolcpus" resources, the device manager will allocate them. In this code we check whether such resources have been allocated, and if so we set the container cpuset to the isolated CPUs. This does mean that it really only makes sense to specify "isolcpus" resources for best-effort or burstable pods, not for guaranteed ones since that would throw off the accounting code. In order to ensure the accounting still works as designed, if "isolcpus" are specified for guaranteed pods, the affinity will be set to the non-isolated CPUs. Signed-off-by: Chris Friesen Co-authored-by: Jim Gauld --- pkg/kubelet/cm/container_manager_linux.go | 1 + pkg/kubelet/cm/cpumanager/cpu_manager.go | 30 ++++++++- pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 14 +++- pkg/kubelet/cm/cpumanager/policy_static.go | 86 +++++++++++++++++++++++-- pkg/kubelet/cm/cpumanager/policy_static_test.go | 46 ++++++++++--- 5 files changed, 158 insertions(+), 19 deletions(-) diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 13c7176bdc2..e6ffb7a6194 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -325,6 +325,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I cm.GetNodeAllocatableReservation(), nodeConfig.KubeletRootDir, cm.topologyManager, + cm.deviceManager, ) if err != nil { klog.Errorf("failed to initialize cpu manager: %v", err) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 322a2040a77..08d45c77182 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -21,6 +21,8 @@ import ( "math" "sync" "time" + "strings" + "io/ioutil" cadvisorapi "github.com/google/cadvisor/info/v1" v1 "k8s.io/api/core/v1" @@ -34,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/config" + "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/status" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" @@ -51,6 +54,25 @@ type policyName string // cpuManagerStateFileName is the file name where cpu manager stores its state const cpuManagerStateFileName = "cpu_manager_state" +// get the system-level isolated CPUs +func getIsolcpus() cpuset.CPUSet { + dat, err := ioutil.ReadFile("/sys/devices/system/cpu/isolated") + if err != nil { + klog.Errorf("[cpumanager] unable to read sysfs isolcpus subdir") + return cpuset.NewCPUSet() + } + + // The isolated cpus string ends in a newline + cpustring := strings.TrimSuffix(string(dat), "\n") + cset, err := cpuset.Parse(cpustring) + if err != nil { + klog.Errorf("[cpumanager] unable to parse sysfs isolcpus string to cpuset") + return cpuset.NewCPUSet() + } + + return cset +} + // Manager interface provides methods for Kubelet to manage pod cpus. type Manager interface { // Start is called during Kubelet initialization. @@ -127,7 +149,7 @@ func (s *sourcesReadyStub) AddSource(source string) {} func (s *sourcesReadyStub) AllReady() bool { return true } // NewManager creates new cpu manager based on provided policy -func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, numaNodeInfo topology.NUMANodeInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { +func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, numaNodeInfo topology.NUMANodeInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store, deviceManager devicemanager.Manager) (Manager, error) { var topo *topology.CPUTopology var policy Policy @@ -164,7 +186,11 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo // NOTE: Set excludeReserved unconditionally to exclude reserved CPUs from default cpuset. // This variable is primarily to make testing easier. excludeReserved := true - policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, excludeReserved) + // isolCPUs is the set of kernel-isolated CPUs. They should be a subset of specificCPUs or + // of the CPUs that NewStaticPolicy() will pick if numReservedCPUs is set. It's only in the + // argument list here for ease of testing, it's really internal to the policy. + isolCPUs := getIsolcpus() + policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, isolCPUs, affinity, deviceManager, excludeReserved) if err != nil { return nil, fmt.Errorf("new static policy error: %v", err) } diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index a4d8f13c853..e806c62e80e 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" ) type mockState struct { @@ -207,6 +208,7 @@ func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string }) } func TestCPUManagerAdd(t *testing.T) { + testDM, _ := devicemanager.NewManagerStub() testExcl := false testPolicy, _ := NewStaticPolicy( &topology.CPUTopology{ @@ -222,7 +224,8 @@ func TestCPUManagerAdd(t *testing.T) { }, 0, cpuset.NewCPUSet(), - topologymanager.NewFakeManager(), testExcl) + cpuset.NewCPUSet(), + topologymanager.NewFakeManager(), testDM, testExcl) testCases := []struct { description string updateErr error @@ -476,8 +479,9 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) { } testExcl := false + testDM, _ := devicemanager.NewManagerStub() for _, testCase := range testCases { - policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testExcl) + policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, testExcl) state := &mockState{ assignments: testCase.stAssignments, @@ -617,7 +621,8 @@ func TestCPUManagerGenerate(t *testing.T) { } defer os.RemoveAll(sDir) - mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, nil, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) + testDM, err := devicemanager.NewManagerStub() + mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, nil, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager(), testDM) if testCase.expectedError != nil { if !strings.Contains(err.Error(), testCase.expectedError.Error()) { t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) @@ -972,6 +977,7 @@ func TestReconcileState(t *testing.T) { // the following tests are with --reserved-cpus configured func TestCPUManagerAddWithResvList(t *testing.T) { testExcl := false + testDM, _ := devicemanager.NewManagerStub() testPolicy, _ := NewStaticPolicy( &topology.CPUTopology{ NumCPUs: 4, @@ -986,7 +992,9 @@ func TestCPUManagerAddWithResvList(t *testing.T) { }, 1, cpuset.NewCPUSet(0), + cpuset.NewCPUSet(), topologymanager.NewFakeManager(), + testDM, testExcl, ) testCases := []struct { diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index e511caf7ab7..490e7675679 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -18,6 +18,7 @@ package cpumanager import ( "fmt" + "strconv" v1 "k8s.io/api/core/v1" "k8s.io/klog" @@ -27,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" + "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" ) // PolicyStatic is the name of the static policy @@ -80,6 +82,10 @@ type staticPolicy struct { topology *topology.CPUTopology // set of CPUs that is not available for exclusive assignment reserved cpuset.CPUSet + // subset of reserved CPUs with isolcpus attribute + isolcpus cpuset.CPUSet + // parent containerManager, used to get device list + deviceManager devicemanager.Manager // If true, default CPUSet should exclude reserved CPUs excludeReserved bool // topology manager reference to get container Topology affinity @@ -92,7 +98,7 @@ var _ Policy = &staticPolicy{} // NewStaticPolicy returns a CPU manager policy that does not change CPU // assignments for exclusively pinned guaranteed containers after the main // container process starts. -func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, excludeReserved bool) (Policy, error) { +func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, isolCPUs cpuset.CPUSet, affinity topologymanager.Store, deviceManager devicemanager.Manager, excludeReserved bool) (Policy, error) { allCPUs := topology.CPUDetails.CPUs() var reserved cpuset.CPUSet if reservedCPUs.Size() > 0 { @@ -113,9 +119,17 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved) + if !isolCPUs.IsSubsetOf(reserved) { + klog.Errorf("[cpumanager] isolCPUs %v is not a subset of reserved %v", isolCPUs, reserved) + reserved = reserved.Union(isolCPUs) + klog.Warningf("[cpumanager] mismatch isolCPUs %v, force reserved %v", isolCPUs, reserved) + } + return &staticPolicy{ topology: topology, reserved: reserved, + isolcpus: isolCPUs, + deviceManager: deviceManager, excludeReserved: excludeReserved, affinity: affinity, }, nil @@ -151,8 +165,8 @@ func (p *staticPolicy) validateState(s state.State) error { } else { s.SetDefaultCPUSet(allCPUs) } - klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, default:%v\n", - allCPUs, p.reserved, s.GetDefaultCPUSet()) + klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, isolcpus:%v, default:%v\n", + allCPUs, p.reserved, p.isolcpus, s.GetDefaultCPUSet()) return nil } @@ -221,12 +235,13 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai return nil } - cpuset := p.reserved + // TODO: Is the clone actually needed? + cpuset := p.reserved.Clone().Difference(p.isolcpus) if cpuset.IsEmpty() { // If this happens then someone messed up. return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus " + - "(namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v", - pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved) + "(namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v, isolcpus:%v", + pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved, p.isolcpus) } s.SetCPUSet(string(pod.UID), container.Name, cpuset) klog.Infof("[cpumanager] static policy: reserved: AddContainer " + @@ -267,7 +282,37 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai } } } + klog.Infof("[cpumanager] guaranteed: AddContainer " + + "(namespace: %s, pod UID: %s, pod: %s, container: %s); numCPUS=%d, cpuset=%v", + pod.Namespace, string(pod.UID), pod.Name, container.Name, numCPUs, cpuset) + return nil + } + + if isolcpus := p.podIsolCPUs(pod, container); isolcpus.Size() > 0 { + // container has requested isolated CPUs + if set, ok := s.GetCPUSet(string(pod.UID), container.Name); ok { + if set.Equals(isolcpus) { + klog.Infof("[cpumanager] isolcpus container already present in state, skipping " + + "(namespace: %s, pod UID: %s, pod: %s, container: %s)", + pod.Namespace, string(pod.UID), pod.Name, container.Name) + return nil + } else { + klog.Infof("[cpumanager] isolcpus container state has cpus %v, should be %v" + + "(namespace: %s, pod UID: %s, pod: %s, container: %s)", + isolcpus, set, pod.Namespace, string(pod.UID), pod.Name, container.Name) + } + } + // Note that we do not do anything about init containers here. + // It looks like devices are allocated per-pod based on effective requests/limits + // and extra devices from initContainers are not freed up when the regular containers start. + // TODO: confirm this is still true for 1.18 + s.SetCPUSet(string(pod.UID), container.Name, isolcpus) + klog.Infof("[cpumanager] isolcpus: AddContainer " + + "(namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", + pod.Namespace, string(pod.UID), pod.Name, container.Name, isolcpus) + return nil } + // container belongs in the shared pool (nothing to do; use default cpuset) return nil } @@ -462,3 +507,32 @@ func isKubeInfra(pod *v1.Pod) bool { } return false } + +// get the isolated CPUs (if any) from the devices associated with a specific container +func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet { + // NOTE: This is required for TestStaticPolicyAdd() since makePod() does + // not create UID. We also need a way to properly stub devicemanager. + if len(string(pod.UID)) == 0 { + return cpuset.NewCPUSet() + } + devices := p.deviceManager.GetDevices(string(pod.UID), container.Name) + for _, dev := range devices { + // this resource name needs to match the isolcpus device plugin + if dev.ResourceName == "windriver.com/isolcpus" { + cpuStrList := dev.DeviceIds + if len(cpuStrList) > 0 { + cpuSet := cpuset.NewCPUSet() + // loop over the list of strings, convert each one to int, add to cpuset + for _, cpuStr := range cpuStrList { + cpu, err := strconv.Atoi(cpuStr) + if err != nil { + panic(err) + } + cpuSet = cpuSet.Union(cpuset.NewCPUSet(cpu)) + } + return cpuSet + } + } + } + return cpuset.NewCPUSet() +} diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go index 04947d28055..999ab3c1af0 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" + "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" ) type staticPolicyTest struct { @@ -45,8 +46,9 @@ type staticPolicyTest struct { } func TestStaticPolicyName(t *testing.T) { + testDM, _ := devicemanager.NewManagerStub() testExcl := false - policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testExcl) + policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, testExcl) policyName := policy.Name() if policyName != "static" { @@ -56,6 +58,7 @@ func TestStaticPolicyName(t *testing.T) { } func TestStaticPolicyStart(t *testing.T) { + testDM, _ := devicemanager.NewManagerStub() testCases := []staticPolicyTest{ { description: "non-corrupted state", @@ -131,7 +134,7 @@ func TestStaticPolicyStart(t *testing.T) { } for _, testCase := range testCases { t.Run(testCase.description, func(t *testing.T) { - p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.excludeReserved) + p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, testCase.excludeReserved) policy := p.(*staticPolicy) st := &mockState{ assignments: testCase.stAssignments, @@ -179,6 +182,7 @@ func TestStaticPolicyAdd(t *testing.T) { largeTopoSock0CPUSet := largeTopoSock0Builder.Result() largeTopoSock1CPUSet := largeTopoSock1Builder.Result() + testDM, _ := devicemanager.NewManagerStub() testCases := []staticPolicyTest{ { description: "GuPodSingleCore, SingleSocketHT, ExpectError", @@ -447,7 +451,7 @@ func TestStaticPolicyAdd(t *testing.T) { } for _, testCase := range testCases { - policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.excludeReserved) + policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, testCase.excludeReserved) st := &mockState{ assignments: testCase.stAssignments, @@ -490,6 +494,7 @@ func TestStaticPolicyAdd(t *testing.T) { } func TestStaticPolicyRemove(t *testing.T) { + testDM, _ := devicemanager.NewManagerStub() excludeReserved := false testCases := []staticPolicyTest{ { @@ -549,7 +554,7 @@ func TestStaticPolicyRemove(t *testing.T) { } for _, testCase := range testCases { - policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), excludeReserved) + policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, excludeReserved) st := &mockState{ assignments: testCase.stAssignments, @@ -571,6 +576,7 @@ func TestStaticPolicyRemove(t *testing.T) { } func TestTopologyAwareAllocateCPUs(t *testing.T) { + testDM, _ := devicemanager.NewManagerStub() excludeReserved := false testCases := []struct { description string @@ -640,7 +646,7 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) { }, } for _, tc := range testCases { - p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), excludeReserved) + p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, excludeReserved) policy := p.(*staticPolicy) st := &mockState{ assignments: tc.stAssignments, @@ -673,6 +679,7 @@ type staticPolicyTestWithResvList struct { topo *topology.CPUTopology numReservedCPUs int reserved cpuset.CPUSet + isolcpus cpuset.CPUSet stAssignments state.ContainerCPUAssignments stDefaultCPUSet cpuset.CPUSet pod *v1.Pod @@ -713,9 +720,10 @@ func TestStaticPolicyStartWithResvList(t *testing.T) { }, } testExcl := false + testDM, _ := devicemanager.NewManagerStub() for _, testCase := range testCases { t.Run(testCase.description, func(t *testing.T) { - p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), testExcl) + p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testDM, testExcl) if !reflect.DeepEqual(err, testCase.expNewErr) { t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v", testCase.description, testCase.expNewErr, err) @@ -755,6 +763,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { topo: topoSingleSocketHT, numReservedCPUs: 1, reserved: cpuset.NewCPUSet(0), + isolcpus: cpuset.NewCPUSet(), stAssignments: state.ContainerCPUAssignments{}, stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), @@ -767,6 +776,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { topo: topoSingleSocketHT, numReservedCPUs: 2, reserved: cpuset.NewCPUSet(0, 1), + isolcpus: cpuset.NewCPUSet(), stAssignments: state.ContainerCPUAssignments{}, stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7), pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"), @@ -779,6 +789,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { topo: topoSingleSocketHT, numReservedCPUs: 2, reserved: cpuset.NewCPUSet(0, 1), + isolcpus: cpuset.NewCPUSet(), stAssignments: state.ContainerCPUAssignments{ "fakePod": map[string]cpuset.CPUSet{ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), @@ -795,6 +806,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { topo: topoSingleSocketHT, numReservedCPUs: 2, reserved: cpuset.NewCPUSet(0, 1), + isolcpus: cpuset.NewCPUSet(), stAssignments: state.ContainerCPUAssignments{ "fakePod": map[string]cpuset.CPUSet{ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), @@ -806,12 +818,30 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { expCPUAlloc: true, expCSet: cpuset.NewCPUSet(0, 1), }, + { + description: "InfraPod, SingleSocketHT, Isolcpus, ExpectAllocReserved", + topo: topoSingleSocketHT, + numReservedCPUs: 2, + reserved: cpuset.NewCPUSet(0, 1), + isolcpus: cpuset.NewCPUSet(1), + stAssignments: state.ContainerCPUAssignments{ + "fakePod": map[string]cpuset.CPUSet{ + "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), + }, + }, + stDefaultCPUSet: cpuset.NewCPUSet(4, 5), + pod: infraPod, + expErr: nil, + expCPUAlloc: true, + expCSet: cpuset.NewCPUSet(0), + }, } testExcl := true + testDM, _ := devicemanager.NewManagerStub() for _, testCase := range testCases { - policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), testExcl) - + policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, testCase.isolcpus, topologymanager.NewFakeManager(), testDM, testExcl) + st := &mockState{ assignments: testCase.stAssignments, defaultCPUSet: testCase.stDefaultCPUSet, -- 2.16.6