diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 7db6c5223217b..d22a58170ab9d 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -381,10 +381,16 @@ func (m *manager) GetAllCPUs() cpuset.CPUSet { type reconciledContainer struct { podName string + podUID string containerName string containerID string } +type reconciledContainerAllocation struct { + reconciledContainer + allocatedSet cpuset.CPUSet +} + func (m *manager) removeStaleState(rootLogger logr.Logger) { // Only once all sources are ready do we attempt to remove any stale state. // This ensures that the call to `m.activePods()` below will succeed with @@ -473,15 +479,19 @@ func (m *manager) reconcileState(ctx context.Context) (success []reconciledConta failure = []reconciledContainer{} rootLogger := klog.FromContext(ctx) - m.removeStaleState(rootLogger) + + exclusiveCPUContainers := []reconciledContainerAllocation{} + nonExclusiveCPUContainers := []reconciledContainerAllocation{} + + m.Lock() for _, pod := range m.activePods() { podLogger := klog.LoggerWithValues(rootLogger, "pod", klog.KObj(pod)) pstatus, ok := m.podStatusProvider.GetPodStatus(pod.UID) if !ok { podLogger.V(5).Info("skipping pod; status not found") - failure = append(failure, reconciledContainer{pod.Name, "", ""}) + failure = append(failure, reconciledContainer{pod.Name, string(pod.UID), "", ""}) continue } @@ -493,25 +503,24 @@ func (m *manager) reconcileState(ctx context.Context) (success []reconciledConta containerID, err := findContainerIDByName(&pstatus, container.Name) if err != nil { logger.V(5).Info("skipping container; ID not found in pod status", "err", err) - failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) + failure = append(failure, reconciledContainer{pod.Name, string(pod.UID), container.Name, ""}) continue } cstatus, err := findContainerStatusByName(&pstatus, container.Name) if err != nil { logger.V(5).Info("skipping container; container status not found in pod status", "err", err) - failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) + failure = append(failure, reconciledContainer{pod.Name, string(pod.UID), container.Name, ""}) continue } if cstatus.State.Waiting != nil || (cstatus.State.Waiting == nil && cstatus.State.Running == nil && cstatus.State.Terminated == nil) { logger.V(4).Info("skipping container; container still in the waiting state", "err", err) - failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) + failure = append(failure, reconciledContainer{pod.Name, string(pod.UID), container.Name, ""}) continue } - m.Lock() if cstatus.State.Terminated != nil { // The container is terminated but we can't call m.RemoveContainer() // here because it could remove the allocated cpuset for the container @@ -522,7 +531,6 @@ func (m *manager) reconcileState(ctx context.Context) (success []reconciledConta if err == nil { logger.V(4).Info("ignoring terminated container", "containerID", containerID) } - m.Unlock() continue } @@ -530,30 +538,84 @@ func (m *manager) reconcileState(ctx context.Context) (success []reconciledConta // Idempotently add it to the containerMap incase it is missing. // This can happen after a kubelet restart, for example. m.containerMap.Add(string(pod.UID), container.Name, containerID) - m.Unlock() - cset := m.state.GetCPUSetOrDefault(string(pod.UID), container.Name) + cset, exclusive := m.state.GetCPUSet(string(pod.UID), container.Name) + if !exclusive { + cset = m.state.GetDefaultCPUSet() + } if cset.IsEmpty() { // NOTE: This should not happen outside of tests. logger.V(2).Info("ReconcileState: skipping container; empty cpuset assigned") - failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) + failure = append(failure, reconciledContainer{pod.Name, string(pod.UID), container.Name, containerID}) continue } - lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name) - if !cset.Equals(lcset) { - logger.V(5).Info("updating container", "containerID", containerID, "cpuSet", cset) - err = m.updateContainerCPUSet(ctx, containerID, cset) + rca := reconciledContainerAllocation{ + reconciledContainer{pod.Name, string(pod.UID), container.Name, containerID}, + cset, + } + if exclusive { + exclusiveCPUContainers = append(exclusiveCPUContainers, rca) + } else { + nonExclusiveCPUContainers = append(nonExclusiveCPUContainers, rca) + } + + } + } + m.Unlock() + + failedContainersCPUSet := cpuset.New() + + updateContainers := func(containers []reconciledContainerAllocation, preliminary bool) { + for _, rca := range containers { + logger := klog.LoggerWithValues(rootLogger, "podName", rca.podName, "containerName", rca.containerName) + + lcset := m.lastUpdateState.GetCPUSetOrDefault(rca.podUID, rca.containerName) + + // Determine the CPU set to use based on the pass + var targetCPUSet cpuset.CPUSet + if preliminary { + targetCPUSet = rca.allocatedSet.Intersection(lcset) + } else { + targetCPUSet = rca.allocatedSet + } + + // Check if update is needed + if !targetCPUSet.Equals(lcset) { + if !preliminary && !targetCPUSet.Intersection(failedContainersCPUSet).IsEmpty() { + logger.Error(fmt.Errorf("Conflict with previously failed container CPUSet updates"), "failed to update container", "containerID", rca.containerID, "cpuSet", rca.allocatedSet) + failure = append(failure, rca.reconciledContainer) + failedContainersCPUSet = failedContainersCPUSet.Union(lcset) + continue + } + + logger.V(5).Info("updating container", "containerID", rca.containerID, "cpuSet", targetCPUSet) + err := m.updateContainerCPUSet(ctx, rca.containerID, targetCPUSet) if err != nil { - logger.Error(err, "failed to update container", "containerID", containerID, "cpuSet", cset) - failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) + logger.Error(err, "failed to update container", "containerID", rca.containerID, "cpuSet", targetCPUSet) + failure = append(failure, rca.reconciledContainer) + failedContainersCPUSet = failedContainersCPUSet.Union(lcset) continue } - m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset) + m.lastUpdateState.SetCPUSet(rca.podUID, rca.containerName, targetCPUSet) + } + + // Add to success list if required + if !preliminary { + success = append(success, rca.reconciledContainer) } - success = append(success, reconciledContainer{pod.Name, container.Name, containerID}) } } + + // first pass - only remove CPUs from containers using exclusive CPUs + updateContainers(exclusiveCPUContainers, true) + + // second pass - apply CPU sets to non exclusive CPU containers + updateContainers(nonExclusiveCPUContainers, false) + + // third pass - apply final CPU set to containers using exclusive CPUs + updateContainers(exclusiveCPUContainers, false) + return success, failure } diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_others_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_others_test.go new file mode 100644 index 0000000000000..07a1406177b6f --- /dev/null +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_others_test.go @@ -0,0 +1,36 @@ +//go:build !windows + +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cpumanager + +import ( + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + "k8s.io/utils/cpuset" +) + +func (rt mockRuntimeService) getCPUSetFromResources(resources *runtimeapi.ContainerResources) cpuset.CPUSet { + if resources != nil && resources.Linux != nil { + set, err := cpuset.Parse(resources.Linux.CpusetCpus) + if err != nil { + rt.t.Errorf("(%v) Cannot parse Linux CPUSet resources %v", rt.testCaseDescription, resources.Linux.CpusetCpus) + return cpuset.New() + } + return set + } + return cpuset.New() +} diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index d409179575fed..78aeffb76ec8a 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -218,15 +218,52 @@ func (p *mockPolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet { } type mockRuntimeService struct { - err error + err []error + containerIDsWithExclusiveCPUs []string + state map[string]cpuset.CPUSet + testCPUConflicts bool + testCaseDescription string + t *testing.T } -func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error { - return rt.err -} +func (rt *mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error { + var ret error + if len(rt.err) > 0 { + ret = rt.err[0] + rt.err = rt.err[1:] + } + + // update state + if ret == nil { + newSet := rt.getCPUSetFromResources(resources) + if !newSet.IsEmpty() { + rt.state[id] = newSet + } + } + + if rt.testCPUConflicts { + // count in how many containers each CPU is used + cpuUsage := make(map[int][]string) + for containerID, set := range rt.state { + for _, cpu := range set.List() { + cpuUsage[cpu] = append(cpuUsage[cpu], containerID) + } + } + + // check if CPUs assigned to containers with exclusive CPUs are used exactly once + for _, containerID := range rt.containerIDsWithExclusiveCPUs { + set := rt.state[containerID] + for _, cpu := range set.List() { + if len(cpuUsage[cpu]) != 1 { + rt.t.Errorf("%v", rt.testCaseDescription) + rt.t.Errorf("after updating container resources of %s", id) + rt.t.Errorf("Expected CPU %d usage 1, actual usage %d %v", cpu, len(cpuUsage[cpu]), cpuUsage[cpu]) + } + } + } + } -func (rt mockRuntimeService) Close(_ context.Context) error { - return rt.err + return ret } type mockPodStatusProvider struct { @@ -434,7 +471,7 @@ func TestCPUManagerAdd(t *testing.T) { nil) testCases := []struct { description string - updateErr error + updateErr []error policy Policy expCPUSet cpuset.CPUSet expAllocateErr error @@ -468,7 +505,7 @@ func TestCPUManagerAdd(t *testing.T) { defaultCPUSet: cpuset.New(1, 2, 3, 4), }, lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{ + containerRuntime: &mockRuntimeService{ err: testCase.updateErr, }, containerMap: containermap.NewContainerMap(), @@ -698,7 +735,7 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) { policy: policy, state: mockState, lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{}, + containerRuntime: &mockRuntimeService{}, containerMap: containermap.NewContainerMap(), podStatusProvider: mockPodStatusProvider{}, sourcesReady: &sourcesReadyStub{}, @@ -890,7 +927,7 @@ func TestCPUManagerRemove(t *testing.T) { defaultCPUSet: cpuset.New(), }, lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{}, + containerRuntime: &mockRuntimeService{}, containerMap: containerMap, activePods: func() []*v1.Pod { return nil }, podStatusProvider: mockPodStatusProvider{}, @@ -907,7 +944,7 @@ func TestCPUManagerRemove(t *testing.T) { err: fmt.Errorf("fake error"), }, state: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{}, + containerRuntime: &mockRuntimeService{}, containerMap: containerMap, activePods: func() []*v1.Pod { return nil }, podStatusProvider: mockPodStatusProvider{}, @@ -951,20 +988,25 @@ func TestReconcileState(t *testing.T) { nil) testCases := []struct { - description string - policy Policy - activePods []*v1.Pod - pspPS v1.PodStatus - pspFound bool - updateErr error - stAssignments state.ContainerCPUAssignments - stDefaultCPUSet cpuset.CPUSet - lastUpdateStAssignments state.ContainerCPUAssignments - lastUpdateStDefaultCPUSet cpuset.CPUSet - expectStAssignments state.ContainerCPUAssignments - expectStDefaultCPUSet cpuset.CPUSet - expectSucceededContainerName string - expectFailedContainerName string + description string + policy Policy + activePods []*v1.Pod + pspPS v1.PodStatus + pspFound bool + updateErr []error + containerIDsWithExclusiveCPUs []string + containerRuntimeInitialState map[string]cpuset.CPUSet + stAssignments state.ContainerCPUAssignments + stDefaultCPUSet cpuset.CPUSet + lastUpdateStAssignments state.ContainerCPUAssignments + lastUpdateStDefaultCPUSet cpuset.CPUSet + expectStAssignments state.ContainerCPUAssignments + expectStDefaultCPUSet cpuset.CPUSet + expectLastUpdateStAssignments state.ContainerCPUAssignments + expectLastUpdateStDefaultCPUSet cpuset.CPUSet + expectContainerRuntimeState map[string]cpuset.CPUSet + expectSucceededContainerName []string + expectFailedContainerName []string }{ { description: "cpu manager reconcile - no error", @@ -995,8 +1037,10 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), @@ -1010,9 +1054,18 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(1, 2), }, }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodUID": map[string]cpuset.CPUSet{ + "fakeContainerName": cpuset.New(1, 2), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { description: "cpu manager reconcile init container - no error", @@ -1043,8 +1096,10 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), @@ -1058,9 +1113,18 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(1, 2), }, }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodUID": map[string]cpuset.CPUSet{ + "fakeContainerName": cpuset.New(1, 2), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { description: "cpu manager reconcile - pod status not found", @@ -1080,17 +1144,22 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspPS: v1.PodStatus{}, - pspFound: false, - updateErr: nil, - stAssignments: state.ContainerCPUAssignments{}, - stDefaultCPUSet: cpuset.New(), - lastUpdateStAssignments: state.ContainerCPUAssignments{}, - lastUpdateStDefaultCPUSet: cpuset.New(), - expectStAssignments: state.ContainerCPUAssignments{}, - expectStDefaultCPUSet: cpuset.New(), - expectSucceededContainerName: "", - expectFailedContainerName: "", + pspPS: v1.PodStatus{}, + pspFound: false, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.New(), + lastUpdateStAssignments: state.ContainerCPUAssignments{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{}, + expectStDefaultCPUSet: cpuset.New(), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{}, }, { description: "cpu manager reconcile - container state not found", @@ -1118,16 +1187,21 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, - stAssignments: state.ContainerCPUAssignments{}, - stDefaultCPUSet: cpuset.New(), - lastUpdateStAssignments: state.ContainerCPUAssignments{}, - lastUpdateStDefaultCPUSet: cpuset.New(), - expectStAssignments: state.ContainerCPUAssignments{}, - expectStDefaultCPUSet: cpuset.New(), - expectSucceededContainerName: "", - expectFailedContainerName: "fakeContainerName", + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.New(), + lastUpdateStAssignments: state.ContainerCPUAssignments{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{}, + expectStDefaultCPUSet: cpuset.New(), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{}, }, { description: "cpu manager reconclie - cpuset is empty", @@ -1158,8 +1232,10 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(), @@ -1173,9 +1249,12 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(), }, }, - expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), - expectSucceededContainerName: "", - expectFailedContainerName: "fakeContainerName", + expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{"fakeContainerName"}, }, { description: "cpu manager reconclie - container update error", @@ -1206,8 +1285,10 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: fmt.Errorf("fake container update error"), + pspFound: true, + updateErr: []error{fmt.Errorf("fake container update error")}, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), @@ -1221,9 +1302,12 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(1, 2), }, }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "", - expectFailedContainerName: "fakeContainerName", + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{"fakeContainerName"}, }, { description: "cpu manager reconcile - state has inactive container", @@ -1254,8 +1338,10 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), @@ -1272,9 +1358,18 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(1, 2), }, }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodUID": map[string]cpuset.CPUSet{ + "fakeContainerName": cpuset.New(1, 2), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { description: "cpu manager reconcile - last update state is current", @@ -1305,8 +1400,12 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), @@ -1318,15 +1417,24 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(1, 2), }, }, - lastUpdateStDefaultCPUSet: cpuset.New(5, 6, 7), + lastUpdateStDefaultCPUSet: cpuset.New(), expectStAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), }, }, - expectStDefaultCPUSet: cpuset.New(5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectStDefaultCPUSet: cpuset.New(5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodUID": map[string]cpuset.CPUSet{ + "fakeContainerName": cpuset.New(1, 2), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { description: "cpu manager reconcile - last update state is not current", @@ -1357,8 +1465,12 @@ func TestReconcileState(t *testing.T) { }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(3, 4), + }, stAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), @@ -1370,877 +1482,2624 @@ func TestReconcileState(t *testing.T) { "fakeContainerName": cpuset.New(3, 4), }, }, - lastUpdateStDefaultCPUSet: cpuset.New(1, 2, 5, 6, 7), + lastUpdateStDefaultCPUSet: cpuset.New(), expectStAssignments: state.ContainerCPUAssignments{ "fakePodUID": map[string]cpuset.CPUSet{ "fakeContainerName": cpuset.New(1, 2), }, }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", - }, - } - - for _, testCase := range testCases { - logger, _ := ktesting.NewTestContext(t) - mgr := &manager{ - policy: testCase.policy, - state: &mockState{ - assignments: testCase.stAssignments, - defaultCPUSet: testCase.stDefaultCPUSet, - }, - lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{ - err: testCase.updateErr, - }, - containerMap: containermap.NewContainerMap(), - activePods: func() []*v1.Pod { - return testCase.activePods - }, - podStatusProvider: mockPodStatusProvider{ - podStatus: testCase.pspPS, - found: testCase.pspFound, + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodUID": map[string]cpuset.CPUSet{ + "fakeContainerName": cpuset.New(1, 2), + }, }, - } - mgr.sourcesReady = &sourcesReadyStub{} - success, failure := mgr.reconcileState(context.Background()) - - if !reflect.DeepEqual(testCase.expectStAssignments, mgr.state.GetCPUAssignments()) { - t.Errorf("%v", testCase.description) - t.Errorf("Expected state container cpu assignments: %v, actual: %v", testCase.expectStAssignments, mgr.state.GetCPUAssignments()) - - } - - if !reflect.DeepEqual(testCase.expectStDefaultCPUSet, mgr.state.GetDefaultCPUSet()) { - t.Errorf("%v", testCase.description) - t.Errorf("Expected state default cpuset: %v, actual: %v", testCase.expectStDefaultCPUSet, mgr.state.GetDefaultCPUSet()) - - } - - if testCase.expectSucceededContainerName != "" { - // Search succeeded reconciled containers for the supplied name. - foundSucceededContainer := false - for _, reconciled := range success { - if reconciled.containerName == testCase.expectSucceededContainerName { - foundSucceededContainer = true - break - } - } - if !foundSucceededContainer { - t.Errorf("%v", testCase.description) - t.Errorf("Expected reconciliation success for container: %s", testCase.expectSucceededContainerName) - } - } - - if testCase.expectFailedContainerName != "" { - // Search failed reconciled containers for the supplied name. - foundFailedContainer := false - for _, reconciled := range failure { - if reconciled.containerName == testCase.expectFailedContainerName { - foundFailedContainer = true - break - } - } - if !foundFailedContainer { - t.Errorf("%v", testCase.description) - t.Errorf("Expected reconciliation failure for container: %s", testCase.expectFailedContainerName) - } - } - } -} - -// above test cases are without kubelet --reserved-cpus cmd option -// the following tests are with --reserved-cpus configured -func TestCPUManagerAddWithResvList(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, false) - logger, _ := ktesting.NewTestContext(t) - testPolicy, _ := NewStaticPolicy( - logger, - &topology.CPUTopology{ - NumCPUs: 4, - NumSockets: 1, - NumCores: 4, - CPUDetails: map[int]topology.CPUInfo{ - 0: {CoreID: 0, SocketID: 0}, - 1: {CoreID: 1, SocketID: 0}, - 2: {CoreID: 2, SocketID: 0}, - 3: {CoreID: 3, SocketID: 0}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, - 1, - cpuset.New(0), - topologymanager.NewFakeManager(), - nil) - testCases := []struct { - description string - updateErr error - policy Policy - expCPUSet cpuset.CPUSet - expAllocateErr error - expAddContainerErr error - }{ - { - description: "cpu manager add - no error", - updateErr: nil, - policy: testPolicy, - expCPUSet: cpuset.New(0, 3), - expAllocateErr: nil, - expAddContainerErr: nil, - }, - } - - for _, testCase := range testCases { - mgr := &manager{ - policy: testCase.policy, - state: &mockState{ - assignments: state.ContainerCPUAssignments{}, - defaultCPUSet: cpuset.New(0, 1, 2, 3), - }, - lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{ - err: testCase.updateErr, - }, - containerMap: containermap.NewContainerMap(), - podStatusProvider: mockPodStatusProvider{}, - sourcesReady: &sourcesReadyStub{}, - } - - pod := makePod("fakePod", "fakeContainer", "2", "2") - container := &pod.Spec.Containers[0] - mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} } - - err := mgr.Allocate(pod, container, lifecycle.AddOperation) - if !reflect.DeepEqual(err, testCase.expAllocateErr) { - t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v", - testCase.description, testCase.expAllocateErr, err) - } - - mgr.AddContainer(logger, pod, container, "fakeID") - _, _, err = mgr.containerMap.GetContainerRef("fakeID") - if !reflect.DeepEqual(err, testCase.expAddContainerErr) { - t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v", - testCase.description, testCase.expAddContainerErr, err) - } - if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) { - t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v", - testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet()) - } - } -} - -func TestCPUManagerHandlePolicyOptions(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, false) - testCases := []struct { - description string - cpuPolicyName string - cpuPolicyOptions map[string]string - expectedError error - }{ { - description: "options to none policy", - cpuPolicyName: "none", - cpuPolicyOptions: map[string]string{ - FullPCPUsOnlyOption: "true", - }, - expectedError: fmt.Errorf("received unsupported options"), - }, - } - - // any correct realistic topology is fine. We pick a simple one. - mockedMachineInfo := cadvisorapi.MachineInfo{ - NumCores: 4, - Topology: []cadvisorapi.Node{ - { - Cores: []cadvisorapi.Core{ - { - Id: 0, - Threads: []int{0}, + description: "cpu manager reconcile - default CPU sets no error", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", }, - { - Id: 1, - Threads: []int{1}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, }, - { - Id: 2, - Threads: []int{2}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", }, - { - Id: 3, - Threads: []int{3}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, }, }, }, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.description, func(t *testing.T) { - machineInfo := &mockedMachineInfo - nodeAllocatableReservation := v1.ResourceList{} - sDir, err := os.MkdirTemp("", "cpu_manager_test") - if err != nil { - t.Errorf("cannot create state file: %s", err.Error()) - } - defer removeStateDirectory(t, sDir) - - logger, _ := ktesting.NewTestContext(t) - _, err = NewManager(logger, testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.New(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) - if err == nil { - t.Errorf("Expected error, but NewManager succeeded") - } - if !strings.Contains(err.Error(), testCase.expectedError.Error()) { - t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) - } - }) - - } -} - -func TestCPUManagerGetAllocatableCPUs(t *testing.T) { - logger, _ := ktesting.NewTestContext(t) - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, false) - nonePolicy, _ := NewNonePolicy(nil) - staticPolicy, _ := NewStaticPolicy( - logger, - &topology.CPUTopology{ - NumCPUs: 4, - NumSockets: 1, - NumCores: 4, - CPUDetails: map[int]topology.CPUInfo{ - 0: {CoreID: 0, SocketID: 0}, - 1: {CoreID: 1, SocketID: 0}, - 2: {CoreID: 2, SocketID: 0}, - 3: {CoreID: 3, SocketID: 0}, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, }, - }, - 1, - cpuset.New(0), - topologymanager.NewFakeManager(), - nil) - - testCases := []struct { - description string - policy Policy - expAllocatableCPUs cpuset.CPUSet - }{ - { - description: "None Policy", - policy: nonePolicy, - expAllocatableCPUs: cpuset.New(), - }, - { - description: "Static Policy", - policy: staticPolicy, - expAllocatableCPUs: cpuset.New(1, 2, 3), - }, - } - for _, testCase := range testCases { - mgr := &manager{ - policy: testCase.policy, - activePods: func() []*v1.Pod { return nil }, - state: &mockState{ - assignments: state.ContainerCPUAssignments{}, - defaultCPUSet: cpuset.New(0, 1, 2, 3), + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + lastUpdateStAssignments: state.ContainerCPUAssignments{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{}, + expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(1, 2, 3, 4, 5, 6, 7), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(1, 2, 3, 4, 5, 6, 7), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(1, 2, 3, 4, 5, 6, 7), + }, }, - lastUpdateState: state.NewMemoryState(logger), - containerMap: containermap.NewContainerMap(), - podStatusProvider: mockPodStatusProvider{}, - sourcesReady: &sourcesReadyStub{}, - } - mgr.sourcesReady = &sourcesReadyStub{} - mgr.allocatableCPUs = testCase.policy.GetAllocatableCPUs(mgr.state) - - pod := makePod("fakePod", "fakeContainer", "2", "2") - container := &pod.Spec.Containers[0] - - _ = mgr.Allocate(pod, container, lifecycle.AddOperation) - - if !mgr.GetAllocatableCPUs().Equals(testCase.expAllocatableCPUs) { - t.Errorf("Policy GetAllocatableCPUs() error (%v). expected cpuset %v for container %v but got %v", - testCase.description, testCase.expAllocatableCPUs, "fakeContainer", mgr.GetAllocatableCPUs()) - } - } -} - -func TestCPUManagerAddWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) - logger, _ := ktesting.NewTestContext(t) - - testPolicy, _ := NewStaticPolicy( - logger, - &topology.CPUTopology{ - NumCPUs: 4, - NumSockets: 1, - NumCores: 4, - CPUDetails: map[int]topology.CPUInfo{ - 0: {CoreID: 0, SocketID: 0}, - 1: {CoreID: 1, SocketID: 0}, - 2: {CoreID: 2, SocketID: 0}, - 3: {CoreID: 3, SocketID: 0}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 3, 4, 5, 6, 7), + "fakeContainerBID": cpuset.New(1, 2, 3, 4, 5, 6, 7), + "fakeContainerCID": cpuset.New(1, 2, 3, 4, 5, 6, 7), }, - }, - 0, - cpuset.New(), - topologymanager.NewFakeManager(), - nil) - testCases := []struct { - description string - updateErr error - policy Policy - expCPUSet cpuset.CPUSet - expAllocateErr error - expAddContainerErr error - }{ - { - description: "cpu manager add - no error", - updateErr: nil, - policy: testPolicy, - expCPUSet: cpuset.New(3, 4), - expAllocateErr: nil, - expAddContainerErr: nil, + expectSucceededContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{}, }, { - description: "cpu manager add - policy add container error", - updateErr: nil, - policy: &mockPolicy{ - err: fmt.Errorf("fake reg error"), + description: "cpu manager reconcile - fail in first reconcile pass does not cause conflict", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, }, - expCPUSet: cpuset.New(1, 2, 3, 4), - expAllocateErr: fmt.Errorf("fake reg error"), - expAddContainerErr: nil, - }, - } - - for _, testCase := range testCases { - mgr := &manager{ - policy: testCase.policy, - state: &mockState{ - allocations: state.ContainerCPUAllocations{}, - defaultCPUSet: cpuset.New(1, 2, 3, 4), + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, }, - lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{ - err: testCase.updateErr, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + nil, //fakeContainerBID pass 2 ok + nil, //fakeContainerCID pass 3 ok }, - containerMap: containermap.NewContainerMap(), - podStatusProvider: mockPodStatusProvider{}, - sourcesReady: &sourcesReadyStub{}, - } - - pod := makePod("fakePod", "fakeContainer", "2", "2") - container := &pod.Spec.Containers[0] - mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} } - - err := mgr.Allocate(pod, container, lifecycle.AddOperation) - if !reflect.DeepEqual(err, testCase.expAllocateErr) { - t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v", - testCase.description, testCase.expAllocateErr, err) - } - - mgr.AddContainer(logger, pod, container, "fakeID") - _, _, err = mgr.containerMap.GetContainerRef("fakeID") - if !reflect.DeepEqual(err, testCase.expAddContainerErr) { - t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v", - testCase.description, testCase.expAddContainerErr, err) - } - if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) { - t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v", - testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet()) - } - } -} - -func TestCPUManagerAddWithInitContainersWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) - testCases := []struct { - description string - topo *topology.CPUTopology - numReservedCPUs int - initContainerIDs []string - containerIDs []string - stAllocations state.ContainerCPUAllocations - stDefaultCPUSet cpuset.CPUSet + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, + stAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(5, 8), + }, + }, + stDefaultCPUSet: cpuset.New(4, 7), + lastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(6, 7, 8), + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(5, 8), + }, + }, + expectStDefaultCPUSet: cpuset.New(4, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(4, 7), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(5, 8), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(4, 7), + "fakeContainerCID": cpuset.New(5, 8), + }, + expectSucceededContainerName: []string{"fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{"fakeContainerAName"}, + }, + { + description: "cpu manager reconcile - fail in first reconcile pass causes conflict in second pass", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + nil, //fakeContainerCID pass 3 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, + stAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(8), + }, + }, + stDefaultCPUSet: cpuset.New(1, 4, 7), + lastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(6, 7, 8), + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(8), + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 4, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(8), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(8), + }, + expectSucceededContainerName: []string{"fakeContainerCName"}, + expectFailedContainerName: []string{"fakeContainerAName", "fakeContainerBName"}, + }, + { + description: "cpu manager reconcile - fail in first reconcile pass causes conflict in second pass which causes conflict in third pass", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, + stAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(5, 8), + }, + }, + stDefaultCPUSet: cpuset.New(1, 4, 7), + lastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(6, 7, 8), + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(5, 8), + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 4, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(8), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(8), + }, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, + }, + { + description: "cpu manager reconcile - fail in first reconcile pass causes conflict in third pass", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + nil, //fakeContainerBID pass 2 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, + stAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(2, 5, 8), + }, + }, + stDefaultCPUSet: cpuset.New(4, 7), + lastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(6, 7, 8), + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 3, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(2, 5, 8), + }, + }, + expectStDefaultCPUSet: cpuset.New(4, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(4, 7), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(8), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(4, 7), + "fakeContainerCID": cpuset.New(8), + }, + expectSucceededContainerName: []string{"fakeContainerBName"}, + expectFailedContainerName: []string{"fakeContainerAName", "fakeContainerCName"}, + }, + { + description: "cpu manager reconcile - fail in second reconcile pass causes conflict in third pass", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: []error{ + nil, //fakeContainerAID pass 1 ok + nil, //fakeContainerCID pass 1 ok + fmt.Errorf("fakeContainerBID pass 2 error"), + nil, //fakeContainerAID pass 3 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, + stAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(2, 5, 8), + }, + }, + stDefaultCPUSet: cpuset.New(1, 4, 7), + lastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 1, 2), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(6, 7, 8), + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 6), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(2, 5, 8), + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 4, 7), + expectLastUpdateStAssignments: state.ContainerCPUAssignments{ + "fakePodAUID": map[string]cpuset.CPUSet{ + "fakeContainerAName": cpuset.New(0, 6), + }, + "fakePodBUID": map[string]cpuset.CPUSet{ + "fakeContainerBName": cpuset.New(3, 4, 5), + }, + "fakePodCUID": map[string]cpuset.CPUSet{ + "fakeContainerCName": cpuset.New(8), + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 6), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(8), + }, + expectSucceededContainerName: []string{"fakeContainerAName"}, + expectFailedContainerName: []string{"fakeContainerBName", "fakeContainerCName"}, + }, + } + for _, testCase := range testCases { + logger, _ := ktesting.NewTestContext(t) + mgr := &manager{ + policy: testCase.policy, + state: &mockState{ + assignments: testCase.stAssignments, + defaultCPUSet: testCase.stDefaultCPUSet, + }, + lastUpdateState: state.NewMemoryState(logger), + containerRuntime: &mockRuntimeService{ + err: testCase.updateErr, + containerIDsWithExclusiveCPUs: testCase.containerIDsWithExclusiveCPUs, + state: testCase.containerRuntimeInitialState, + testCPUConflicts: true, + testCaseDescription: testCase.description, + t: t, + }, + containerMap: containermap.NewContainerMap(), + activePods: func() []*v1.Pod { + return testCase.activePods + }, + podStatusProvider: mockPodStatusProvider{ + podStatus: testCase.pspPS, + found: testCase.pspFound, + }, + } + mgr.sourcesReady = &sourcesReadyStub{} + mgr.lastUpdateState.SetCPUAssignments(testCase.lastUpdateStAssignments) + mgr.lastUpdateState.SetDefaultCPUSet(testCase.lastUpdateStDefaultCPUSet) + success, failure := mgr.reconcileState(context.Background()) + + if !reflect.DeepEqual(testCase.expectStAssignments, mgr.state.GetCPUAssignments()) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected state container cpu assignments: %v, actual: %v", testCase.expectStAssignments, mgr.state.GetCPUAssignments()) + } + + if !reflect.DeepEqual(testCase.expectStDefaultCPUSet, mgr.state.GetDefaultCPUSet()) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected state default cpuset: %v, actual: %v", testCase.expectStDefaultCPUSet, mgr.state.GetDefaultCPUSet()) + } + + if !reflect.DeepEqual(testCase.expectLastUpdateStAssignments, mgr.lastUpdateState.GetCPUAssignments()) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected lastUpdateState container cpu assignments: %v, actual: %v", testCase.expectLastUpdateStAssignments, mgr.lastUpdateState.GetCPUAssignments()) + } + + if !reflect.DeepEqual(testCase.expectLastUpdateStDefaultCPUSet, mgr.lastUpdateState.GetDefaultCPUSet()) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected lastUpdateState default cpuset: %v, actual: %v", testCase.expectLastUpdateStDefaultCPUSet, mgr.lastUpdateState.GetDefaultCPUSet()) + } + + if !reflect.DeepEqual(testCase.expectContainerRuntimeState, mgr.containerRuntime.(*mockRuntimeService).state) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected containerRuntimeState: %v, actual: %v", testCase.expectContainerRuntimeState, mgr.containerRuntime.(*mockRuntimeService).state) + } + + for _, name := range testCase.expectSucceededContainerName { + // Search succeeded reconciled containers for the supplied name. + foundSucceededContainer := false + for _, reconciled := range success { + if reconciled.containerName == name { + foundSucceededContainer = true + break + } + } + if !foundSucceededContainer { + t.Errorf("%v", testCase.description) + t.Errorf("Expected reconciliation success for container: %s", name) + } + } + + for _, name := range testCase.expectFailedContainerName { + // Search failed reconciled containers for the supplied name. + foundFailedContainer := false + for _, reconciled := range failure { + if reconciled.containerName == name { + foundFailedContainer = true + break + } + } + if !foundFailedContainer { + t.Errorf("%v", testCase.description) + t.Errorf("Expected reconciliation failure for container: %s", name) + } + } + } +} + +// above test cases are without kubelet --reserved-cpus cmd option +// the following tests are with --reserved-cpus configured +func TestCPUManagerAddWithResvList(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, false) + logger, _ := ktesting.NewTestContext(t) + testPolicy, _ := NewStaticPolicy( + logger, + &topology.CPUTopology{ + NumCPUs: 4, + NumSockets: 1, + NumCores: 4, + CPUDetails: map[int]topology.CPUInfo{ + 0: {CoreID: 0, SocketID: 0}, + 1: {CoreID: 1, SocketID: 0}, + 2: {CoreID: 2, SocketID: 0}, + 3: {CoreID: 3, SocketID: 0}, + }, + }, + 1, + cpuset.New(0), + topologymanager.NewFakeManager(), + nil) + testCases := []struct { + description string + updateErr []error + policy Policy + expCPUSet cpuset.CPUSet + expAllocateErr error + expAddContainerErr error + }{ + { + description: "cpu manager add - no error", + updateErr: nil, + policy: testPolicy, + expCPUSet: cpuset.New(0, 3), + expAllocateErr: nil, + expAddContainerErr: nil, + }, + } + + for _, testCase := range testCases { + mgr := &manager{ + policy: testCase.policy, + state: &mockState{ + assignments: state.ContainerCPUAssignments{}, + defaultCPUSet: cpuset.New(0, 1, 2, 3), + }, + lastUpdateState: state.NewMemoryState(logger), + containerRuntime: &mockRuntimeService{ + err: testCase.updateErr, + }, + containerMap: containermap.NewContainerMap(), + podStatusProvider: mockPodStatusProvider{}, + sourcesReady: &sourcesReadyStub{}, + } + + pod := makePod("fakePod", "fakeContainer", "2", "2") + container := &pod.Spec.Containers[0] + mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} } + + err := mgr.Allocate(pod, container, lifecycle.AddOperation) + if !reflect.DeepEqual(err, testCase.expAllocateErr) { + t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v", + testCase.description, testCase.expAllocateErr, err) + } + + mgr.AddContainer(logger, pod, container, "fakeID") + _, _, err = mgr.containerMap.GetContainerRef("fakeID") + if !reflect.DeepEqual(err, testCase.expAddContainerErr) { + t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v", + testCase.description, testCase.expAddContainerErr, err) + } + if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) { + t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v", + testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet()) + } + } +} + +func TestCPUManagerHandlePolicyOptions(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, false) + testCases := []struct { + description string + cpuPolicyName string + cpuPolicyOptions map[string]string + expectedError error + }{ + { + description: "options to none policy", + cpuPolicyName: "none", + cpuPolicyOptions: map[string]string{ + FullPCPUsOnlyOption: "true", + }, + expectedError: fmt.Errorf("received unsupported options"), + }, + } + + // any correct realistic topology is fine. We pick a simple one. + mockedMachineInfo := cadvisorapi.MachineInfo{ + NumCores: 4, + Topology: []cadvisorapi.Node{ + { + Cores: []cadvisorapi.Core{ + { + Id: 0, + Threads: []int{0}, + }, + { + Id: 1, + Threads: []int{1}, + }, + { + Id: 2, + Threads: []int{2}, + }, + { + Id: 3, + Threads: []int{3}, + }, + }, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { + machineInfo := &mockedMachineInfo + nodeAllocatableReservation := v1.ResourceList{} + sDir, err := os.MkdirTemp("", "cpu_manager_test") + if err != nil { + t.Errorf("cannot create state file: %s", err.Error()) + } + defer removeStateDirectory(t, sDir) + + logger, _ := ktesting.NewTestContext(t) + _, err = NewManager(logger, testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.New(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) + if err == nil { + t.Errorf("Expected error, but NewManager succeeded") + } + if !strings.Contains(err.Error(), testCase.expectedError.Error()) { + t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) + } + }) + + } +} + +func TestCPUManagerGetAllocatableCPUs(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, false) + nonePolicy, _ := NewNonePolicy(nil) + staticPolicy, _ := NewStaticPolicy( + logger, + &topology.CPUTopology{ + NumCPUs: 4, + NumSockets: 1, + NumCores: 4, + CPUDetails: map[int]topology.CPUInfo{ + 0: {CoreID: 0, SocketID: 0}, + 1: {CoreID: 1, SocketID: 0}, + 2: {CoreID: 2, SocketID: 0}, + 3: {CoreID: 3, SocketID: 0}, + }, + }, + 1, + cpuset.New(0), + topologymanager.NewFakeManager(), + nil) + + testCases := []struct { + description string + policy Policy + expAllocatableCPUs cpuset.CPUSet + }{ + { + description: "None Policy", + policy: nonePolicy, + expAllocatableCPUs: cpuset.New(), + }, + { + description: "Static Policy", + policy: staticPolicy, + expAllocatableCPUs: cpuset.New(1, 2, 3), + }, + } + for _, testCase := range testCases { + mgr := &manager{ + policy: testCase.policy, + activePods: func() []*v1.Pod { return nil }, + state: &mockState{ + assignments: state.ContainerCPUAssignments{}, + defaultCPUSet: cpuset.New(0, 1, 2, 3), + }, + lastUpdateState: state.NewMemoryState(logger), + containerMap: containermap.NewContainerMap(), + podStatusProvider: mockPodStatusProvider{}, + sourcesReady: &sourcesReadyStub{}, + } + mgr.sourcesReady = &sourcesReadyStub{} + mgr.allocatableCPUs = testCase.policy.GetAllocatableCPUs(mgr.state) + + pod := makePod("fakePod", "fakeContainer", "2", "2") + container := &pod.Spec.Containers[0] + + _ = mgr.Allocate(pod, container, lifecycle.AddOperation) + + if !mgr.GetAllocatableCPUs().Equals(testCase.expAllocatableCPUs) { + t.Errorf("Policy GetAllocatableCPUs() error (%v). expected cpuset %v for container %v but got %v", + testCase.description, testCase.expAllocatableCPUs, "fakeContainer", mgr.GetAllocatableCPUs()) + } + } +} + +func TestCPUManagerAddWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) + logger, _ := ktesting.NewTestContext(t) + + testPolicy, _ := NewStaticPolicy( + logger, + &topology.CPUTopology{ + NumCPUs: 4, + NumSockets: 1, + NumCores: 4, + CPUDetails: map[int]topology.CPUInfo{ + 0: {CoreID: 0, SocketID: 0}, + 1: {CoreID: 1, SocketID: 0}, + 2: {CoreID: 2, SocketID: 0}, + 3: {CoreID: 3, SocketID: 0}, + }, + }, + 0, + cpuset.New(), + topologymanager.NewFakeManager(), + nil) + testCases := []struct { + description string + updateErr []error + policy Policy + expCPUSet cpuset.CPUSet + expAllocateErr error + expAddContainerErr error + }{ + { + description: "cpu manager add - no error", + updateErr: nil, + policy: testPolicy, + expCPUSet: cpuset.New(3, 4), + expAllocateErr: nil, + expAddContainerErr: nil, + }, + { + description: "cpu manager add - policy add container error", + updateErr: nil, + policy: &mockPolicy{ + err: fmt.Errorf("fake reg error"), + }, + expCPUSet: cpuset.New(1, 2, 3, 4), + expAllocateErr: fmt.Errorf("fake reg error"), + expAddContainerErr: nil, + }, + } + + for _, testCase := range testCases { + mgr := &manager{ + policy: testCase.policy, + state: &mockState{ + allocations: state.ContainerCPUAllocations{}, + defaultCPUSet: cpuset.New(1, 2, 3, 4), + }, + lastUpdateState: state.NewMemoryState(logger), + containerRuntime: &mockRuntimeService{ + err: testCase.updateErr, + }, + containerMap: containermap.NewContainerMap(), + podStatusProvider: mockPodStatusProvider{}, + sourcesReady: &sourcesReadyStub{}, + } + + pod := makePod("fakePod", "fakeContainer", "2", "2") + container := &pod.Spec.Containers[0] + mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} } + + err := mgr.Allocate(pod, container, lifecycle.AddOperation) + if !reflect.DeepEqual(err, testCase.expAllocateErr) { + t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v", + testCase.description, testCase.expAllocateErr, err) + } + + mgr.AddContainer(logger, pod, container, "fakeID") + _, _, err = mgr.containerMap.GetContainerRef("fakeID") + if !reflect.DeepEqual(err, testCase.expAddContainerErr) { + t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v", + testCase.description, testCase.expAddContainerErr, err) + } + if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) { + t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v", + testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet()) + } + } +} + +func TestCPUManagerAddWithInitContainersWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) + testCases := []struct { + description string + topo *topology.CPUTopology + numReservedCPUs int + initContainerIDs []string + containerIDs []string + stAllocations state.ContainerCPUAllocations + stDefaultCPUSet cpuset.CPUSet pod *v1.Pod expInitCSets []cpuset.CPUSet expCSets []cpuset.CPUSet }{ { - description: "No Guaranteed Init CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{{"100m", "100m"}}, - []struct{ request, limit string }{{"4000m", "4000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New()}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5)}, + description: "No Guaranteed Init CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"100m", "100m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New()}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5)}, + }, + { + description: "Equal Number of Guaranteed CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"4000m", "4000m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5)}, + }, + { + description: "More Init Container Guaranteed CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"6000m", "6000m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5, 2, 6)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5)}, + }, + { + description: "Less Init Container Guaranteed CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"2000m", "2000m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5)}, + }, + { + description: "Multi Init Container Equal CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"2000m", "2000m"}}, + []struct{ request, limit string }{ + {"2000m", "2000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4), + cpuset.New(0, 4)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4)}, + }, + { + description: "Multi Init Container Less CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"4000m", "4000m"}, + {"4000m", "4000m"}}, + []struct{ request, limit string }{ + {"2000m", "2000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5), + cpuset.New(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4)}, + }, + { + description: "Multi Init Container More CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"2000m", "2000m"}}, + []struct{ request, limit string }{ + {"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4), + cpuset.New(0, 4)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5)}, + }, + { + description: "Multi Init Container Increasing CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"4000m", "4000m"}}, + []struct{ request, limit string }{ + {"6000m", "6000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4), + cpuset.New(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4, 1, 5, 2, 6)}, + }, + { + description: "Multi Init, Multi App Container Split CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID-1", "appFakeID-2"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"4000m", "4000m"}}, + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"2000m", "2000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.New(0, 4), + cpuset.New(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.New(0, 4), + cpuset.New(1, 5)}, + }, + } + + for _, testCase := range testCases { + logger, _ := ktesting.NewTestContext(t) + policy, _ := NewStaticPolicy(logger, testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil) + + mockState := &mockState{ + allocations: testCase.stAllocations, + defaultCPUSet: testCase.stDefaultCPUSet, + } + + mgr := &manager{ + policy: policy, + state: mockState, + lastUpdateState: state.NewMemoryState(logger), + containerRuntime: &mockRuntimeService{}, + containerMap: containermap.NewContainerMap(), + podStatusProvider: mockPodStatusProvider{}, + sourcesReady: &sourcesReadyStub{}, + activePods: func() []*v1.Pod { + return []*v1.Pod{testCase.pod} + }, + } + + containers := append(testCase.pod.Spec.InitContainers, testCase.pod.Spec.Containers...) //nolint:gocritic + + containerIDs := append(testCase.initContainerIDs, testCase.containerIDs...) //nolint:gocritic + + expCSets := append(testCase.expInitCSets, testCase.expCSets...) //nolint:gocritic + + cumCSet := cpuset.New() + + for i := range containers { + err := mgr.Allocate(testCase.pod, &containers[i], lifecycle.AddOperation) + if err != nil { + t.Errorf("StaticPolicy Allocate() error (%v). unexpected error for container id: %v: %v", + testCase.description, containerIDs[i], err) + } + + mgr.AddContainer(logger, testCase.pod, &containers[i], containerIDs[i]) + _, _, err = mgr.containerMap.GetContainerRef(containerIDs[i]) + if err != nil { + t.Errorf("StaticPolicy AddContainer() error (%v). unexpected error for container id: %v: %v", + testCase.description, containerIDs[i], err) + } + + allocation, found := mockState.allocations[string(testCase.pod.UID)][containers[i].Name] + if !expCSets[i].IsEmpty() && !found { + t.Errorf("StaticPolicy AddContainer() error (%v). expected container %v to be present in assignments %v", + testCase.description, containers[i].Name, mockState.allocations) + } + + if found && !allocation.Original.Equals(expCSets[i]) { + t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v for container %v but got %v", + testCase.description, expCSets[i], containers[i].Name, allocation.Original) + } + + cumCSet = cumCSet.Union(allocation.Original) + } + + if !testCase.stDefaultCPUSet.Difference(cumCSet).Equals(mockState.defaultCPUSet) { + t.Errorf("StaticPolicy error (%v). expected final state for defaultCPUSet %v but got %v", + testCase.description, testCase.stDefaultCPUSet.Difference(cumCSet), mockState.defaultCPUSet) + } + } +} + +func TestCPUManagerGenerateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) + testCases := []struct { + description string + cpuPolicyName string + nodeAllocatableReservation v1.ResourceList + isTopologyBroken bool + expectedPolicy string + expectedError error + }{ + { + description: "set none policy", + cpuPolicyName: "none", + nodeAllocatableReservation: nil, + expectedPolicy: "none", + }, + { + description: "invalid policy name", + cpuPolicyName: "invalid", + nodeAllocatableReservation: nil, + expectedError: fmt.Errorf("unknown policy: \"invalid\""), }, { - description: "Equal Number of Guaranteed CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{{"4000m", "4000m"}}, - []struct{ request, limit string }{{"4000m", "4000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5)}, + description: "static policy", + cpuPolicyName: "static", + nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI)}, + expectedPolicy: "static", }, { - description: "More Init Container Guaranteed CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{{"6000m", "6000m"}}, - []struct{ request, limit string }{{"4000m", "4000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5, 2, 6)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5)}, + description: "static policy - broken topology", + cpuPolicyName: "static", + nodeAllocatableReservation: v1.ResourceList{}, + isTopologyBroken: true, + expectedError: fmt.Errorf("could not detect number of cpus"), + }, + { + description: "static policy - broken reservation", + cpuPolicyName: "static", + nodeAllocatableReservation: v1.ResourceList{}, + expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"), + }, + { + description: "static policy - no CPU resources", + cpuPolicyName: "static", + nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)}, + expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"), + }, + } + + mockedMachineInfo := cadvisorapi.MachineInfo{ + NumCores: 4, + Topology: []cadvisorapi.Node{ + { + Cores: []cadvisorapi.Core{ + { + Id: 0, + Threads: []int{0}, + UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + }, + { + Id: 1, + Threads: []int{1}, + UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + }, + { + Id: 2, + Threads: []int{2}, + UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + }, + { + Id: 3, + Threads: []int{3}, + UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + }, + }, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { + machineInfo := &mockedMachineInfo + if testCase.isTopologyBroken { + machineInfo = &cadvisorapi.MachineInfo{} + } + sDir, err := os.MkdirTemp("", "cpu_manager_test") + if err != nil { + t.Errorf("cannot create state file: %s", err.Error()) + } + defer removeStateDirectory(t, sDir) + + logger, _ := ktesting.NewTestContext(t) + mgr, err := NewManager(logger, testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.New(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) + if testCase.expectedError != nil { + if !strings.Contains(err.Error(), testCase.expectedError.Error()) { + t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) + } + } else { + rawMgr := mgr.(*manager) + if rawMgr.policy.Name() != testCase.expectedPolicy { + t.Errorf("Unexpected policy name. Have: %q wants %q", rawMgr.policy.Name(), testCase.expectedPolicy) + } + if rawMgr.topology == nil { + t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %v", rawMgr.policy.Name(), rawMgr.topology) + } + } + }) + + } +} + +func TestCPUManagerRemoveWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) + containerID := "fakeID" + containerMap := containermap.NewContainerMap() + + logger, _ := ktesting.NewTestContext(t) + mgr := &manager{ + policy: &mockPolicy{ + err: nil, + }, + state: &mockState{ + allocations: state.ContainerCPUAllocations{}, + defaultCPUSet: cpuset.New(), + }, + lastUpdateState: state.NewMemoryState(logger), + containerRuntime: &mockRuntimeService{}, + containerMap: containerMap, + activePods: func() []*v1.Pod { return nil }, + podStatusProvider: mockPodStatusProvider{}, + } + + containerMap.Add("", "", containerID) + err := mgr.RemoveContainer(logger, containerID) + if err != nil { + t.Errorf("CPU Manager RemoveContainer() error. expected error to be nil but got: %v", err) + } + + mgr = &manager{ + policy: &mockPolicy{ + err: fmt.Errorf("fake error"), + }, + state: state.NewMemoryState(logger), + containerRuntime: &mockRuntimeService{}, + containerMap: containerMap, + activePods: func() []*v1.Pod { return nil }, + podStatusProvider: mockPodStatusProvider{}, + } + + containerMap.Add("", "", containerID) + err = mgr.RemoveContainer(logger, containerID) + if !reflect.DeepEqual(err, fmt.Errorf("fake error")) { + t.Errorf("CPU Manager RemoveContainer() error. expected error: fake error but got: %v", err) + } +} + +func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { + if runtime.GOOS == "windows" { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) + } + + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) + logger, _ := ktesting.NewTestContext(t) + + testPolicy, _ := NewStaticPolicy( + logger, + &topology.CPUTopology{ + NumCPUs: 8, + NumSockets: 2, + NumCores: 4, + CPUDetails: map[int]topology.CPUInfo{ + 0: {CoreID: 0, SocketID: 0}, + 1: {CoreID: 1, SocketID: 0}, + 2: {CoreID: 2, SocketID: 0}, + 3: {CoreID: 3, SocketID: 0}, + 4: {CoreID: 0, SocketID: 1}, + 5: {CoreID: 1, SocketID: 1}, + 6: {CoreID: 2, SocketID: 1}, + 7: {CoreID: 3, SocketID: 1}, + }, + }, + 0, + cpuset.New(), + topologymanager.NewFakeManager(), + nil) + + testCases := []struct { + description string + policy Policy + activePods []*v1.Pod + pspPS v1.PodStatus + pspFound bool + updateErr []error + containerIDsWithExclusiveCPUs []string + containerRuntimeInitialState map[string]cpuset.CPUSet + stAllocations state.ContainerCPUAllocations + stDefaultCPUSet cpuset.CPUSet + lastUpdateStAllocations state.ContainerCPUAllocations + lastUpdateStDefaultCPUSet cpuset.CPUSet + expectStAllocations state.ContainerCPUAllocations + expectStDefaultCPUSet cpuset.CPUSet + expectLastUpdateStAllocations state.ContainerCPUAllocations + expectLastUpdateStDefaultCPUSet cpuset.CPUSet + expectContainerRuntimeState map[string]cpuset.CPUSet + expectSucceededContainerName []string + expectFailedContainerName []string + }{ + { + description: "cpu manager reconcile - no error", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { - description: "Less Init Container Guaranteed CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{{"2000m", "2000m"}}, - []struct{ request, limit string }{{"4000m", "4000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5)}, + description: "cpu manager reconcile init container - no error", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + InitContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { - description: "Multi Init Container Equal CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{ - {"2000m", "2000m"}, - {"2000m", "2000m"}}, - []struct{ request, limit string }{ - {"2000m", "2000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4), - cpuset.New(0, 4)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4)}, + description: "cpu manager reconcile - pod status not found", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{}, + pspFound: false, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{}, + expectStDefaultCPUSet: cpuset.New(), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{}, }, { - description: "Multi Init Container Less CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{ - {"4000m", "4000m"}, - {"4000m", "4000m"}}, - []struct{ request, limit string }{ - {"2000m", "2000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5), - cpuset.New(0, 4, 1, 5)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4)}, + description: "cpu manager reconcile - container state not found", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName1", + ContainerID: "docker://fakeContainerID", + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{}, + expectStDefaultCPUSet: cpuset.New(), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{}, }, { - description: "Multi Init Container More CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{ - {"2000m", "2000m"}, - {"2000m", "2000m"}}, - []struct{ request, limit string }{ - {"4000m", "4000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4), - cpuset.New(0, 4)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5)}, + description: "cpu manager reconclie - cpuset is empty", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{"fakeContainerName"}, }, { - description: "Multi Init Container Increasing CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, - containerIDs: []string{"appFakeID"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{ - {"2000m", "2000m"}, - {"4000m", "4000m"}}, - []struct{ request, limit string }{ - {"6000m", "6000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4), - cpuset.New(0, 4, 1, 5)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4, 1, 5, 2, 6)}, + description: "cpu manager reconclie - container update error", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: []error{fmt.Errorf("fake container update error")}, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{}, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{}, + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{"fakeContainerName"}, }, { - description: "Multi Init, Multi App Container Split CPUs", - topo: topoSingleSocketHT, - numReservedCPUs: 0, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), - initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, - containerIDs: []string{"appFakeID-1", "appFakeID-2"}, - pod: makeMultiContainerPod( - []struct{ request, limit string }{ - {"2000m", "2000m"}, - {"4000m", "4000m"}}, - []struct{ request, limit string }{ - {"2000m", "2000m"}, - {"2000m", "2000m"}}), - expInitCSets: []cpuset.CPUSet{ - cpuset.New(0, 4), - cpuset.New(0, 4, 1, 5)}, - expCSets: []cpuset.CPUSet{ - cpuset.New(0, 4), - cpuset.New(1, 5)}, - }, - } - - for _, testCase := range testCases { - logger, _ := ktesting.NewTestContext(t) - policy, _ := NewStaticPolicy(logger, testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil) - - mockState := &mockState{ - allocations: testCase.stAllocations, - defaultCPUSet: testCase.stDefaultCPUSet, - } - - mgr := &manager{ - policy: policy, - state: mockState, - lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{}, - containerMap: containermap.NewContainerMap(), - podStatusProvider: mockPodStatusProvider{}, - sourcesReady: &sourcesReadyStub{}, - activePods: func() []*v1.Pod { - return []*v1.Pod{testCase.pod} + description: "cpu manager reconcile - state has inactive container", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, }, - } - - containers := append(testCase.pod.Spec.InitContainers, testCase.pod.Spec.Containers...) //nolint:gocritic - - containerIDs := append(testCase.initContainerIDs, testCase.containerIDs...) //nolint:gocritic - - expCSets := append(testCase.expInitCSets, testCase.expCSets...) //nolint:gocritic - - cumCSet := cpuset.New() - - for i := range containers { - err := mgr.Allocate(testCase.pod, &containers[i], lifecycle.AddOperation) - if err != nil { - t.Errorf("StaticPolicy Allocate() error (%v). unexpected error for container id: %v: %v", - testCase.description, containerIDs[i], err) - } - - mgr.AddContainer(logger, testCase.pod, &containers[i], containerIDs[i]) - _, _, err = mgr.containerMap.GetContainerRef(containerIDs[i]) - if err != nil { - t.Errorf("StaticPolicy AddContainer() error (%v). unexpected error for container id: %v: %v", - testCase.description, containerIDs[i], err) - } - - allocation, found := mockState.allocations[string(testCase.pod.UID)][containers[i].Name] - if !expCSets[i].IsEmpty() && !found { - t.Errorf("StaticPolicy AddContainer() error (%v). expected container %v to be present in assignments %v", - testCase.description, containers[i].Name, mockState.allocations) - } - - if found && !allocation.Original.Equals(expCSets[i]) { - t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v for container %v but got %v", - testCase.description, expCSets[i], containers[i].Name, allocation.Original) - } - - cumCSet = cumCSet.Union(allocation.Original) - } - - if !testCase.stDefaultCPUSet.Difference(cumCSet).Equals(mockState.defaultCPUSet) { - t.Errorf("StaticPolicy error (%v). expected final state for defaultCPUSet %v but got %v", - testCase.description, testCase.stDefaultCPUSet.Difference(cumCSet), mockState.defaultCPUSet) - } - } -} - -func TestCPUManagerGenerateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) - testCases := []struct { - description string - cpuPolicyName string - nodeAllocatableReservation v1.ResourceList - isTopologyBroken bool - expectedPolicy string - expectedError error - }{ - { - description: "set none policy", - cpuPolicyName: "none", - nodeAllocatableReservation: nil, - expectedPolicy: "none", + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + "secondfakePodUID": map[string]state.ContainerCPUAllocation{ + "secondfakeContainerName": {Original: cpuset.New(3, 4), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { - description: "invalid policy name", - cpuPolicyName: "invalid", - nodeAllocatableReservation: nil, - expectedError: fmt.Errorf("unknown policy: \"invalid\""), + description: "cpu manager reconcile - last update state is current", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { - description: "static policy", - cpuPolicyName: "static", - nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI)}, - expectedPolicy: "static", + description: "cpu manager reconcile - last update state is not current", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodName", + UID: "fakePodUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerName", + ContainerID: "docker://fakeContainerID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(3, 4), + }, + stAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(3, 4), Resized: cpuset.New()}, + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + }, + }, + expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerName": {Original: cpuset.New(3, 4), Resized: cpuset.New(1, 2)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerID": cpuset.New(1, 2), + }, + expectSucceededContainerName: []string{"fakeContainerName"}, + expectFailedContainerName: []string{}, }, { - description: "static policy - broken topology", - cpuPolicyName: "static", - nodeAllocatableReservation: v1.ResourceList{}, - isTopologyBroken: true, - expectedError: fmt.Errorf("could not detect number of cpus"), + description: "cpu manager reconcile - default CPU sets no error", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{}, + stAllocations: state.ContainerCPUAllocations{}, + stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{}, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{}, + expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1, 2, 3, 4, 5, 6, 7), Resized: cpuset.New()}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(1, 2, 3, 4, 5, 6, 7), Resized: cpuset.New()}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(1, 2, 3, 4, 5, 6, 7), Resized: cpuset.New()}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 3, 4, 5, 6, 7), + "fakeContainerBID": cpuset.New(1, 2, 3, 4, 5, 6, 7), + "fakeContainerCID": cpuset.New(1, 2, 3, 4, 5, 6, 7), + }, + expectSucceededContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{}, }, { - description: "static policy - broken reservation", - cpuPolicyName: "static", - nodeAllocatableReservation: v1.ResourceList{}, - expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"), + description: "cpu manager reconcile - exclusive cpu container scaled up", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerBID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 5, 6, 7), + "fakeContainerBID": cpuset.New(3, 4), + "fakeContainerCID": cpuset.New(1, 2, 5, 6, 7), + }, + stAllocations: state.ContainerCPUAllocations{ + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4), Resized: cpuset.New(3, 4, 5, 6)}, + }, + }, + stDefaultCPUSet: cpuset.New(1, 2, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1, 2, 5, 6, 7), Resized: cpuset.New()}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4), Resized: cpuset.New()}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(1, 2, 5, 6, 7), Resized: cpuset.New()}, + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4), Resized: cpuset.New(3, 4, 5, 6)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 2, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1, 2, 5, 6, 7), Resized: cpuset.New(1, 2, 7)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4), Resized: cpuset.New(3, 4, 5, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(1, 2, 5, 6, 7), Resized: cpuset.New(1, 2, 7)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 7), + "fakeContainerBID": cpuset.New(3, 4, 5, 6), + "fakeContainerCID": cpuset.New(1, 2, 7), + }, + expectSucceededContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{}, }, { - description: "static policy - no CPU resources", - cpuPolicyName: "static", - nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)}, - expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"), - }, - } - - mockedMachineInfo := cadvisorapi.MachineInfo{ - NumCores: 4, - Topology: []cadvisorapi.Node{ - { - Cores: []cadvisorapi.Core{ + description: "cpu manager reconcile - exclusive cpu container scaled down", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ { - Id: 0, - Threads: []int{0}, - UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, }, { - Id: 1, - Threads: []int{1}, - UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, }, { - Id: 2, - Threads: []int{2}, - UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerBID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 7), + "fakeContainerBID": cpuset.New(3, 4, 5, 6), + "fakeContainerCID": cpuset.New(1, 2, 7), + }, + stAllocations: state.ContainerCPUAllocations{ + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4), Resized: cpuset.New(3, 4)}, + }, + }, + stDefaultCPUSet: cpuset.New(1, 2, 5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1, 2, 7), Resized: cpuset.New()}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4, 5, 6), Resized: cpuset.New()}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(1, 2, 7), Resized: cpuset.New()}, + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4), Resized: cpuset.New(3, 4)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 2, 5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1, 2, 7), Resized: cpuset.New(1, 2, 5, 6, 7)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3, 4, 5, 6), Resized: cpuset.New(3, 4)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(1, 2, 7), Resized: cpuset.New(1, 2, 5, 6, 7)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 5, 6, 7), + "fakeContainerBID": cpuset.New(3, 4), + "fakeContainerCID": cpuset.New(1, 2, 5, 6, 7), + }, + expectSucceededContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{}, + }, + { + description: "cpu manager reconcile - exclusive cpu containers swap CPUs", + policy: testPolicy, + activePods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodAName", + UID: "fakePodAUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + }, + pspPS: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, }, { - Id: 3, - Threads: []int{3}, - UncoreCaches: []cadvisorapi.Cache{{Id: 1}}, + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, }, }, }, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.description, func(t *testing.T) { - machineInfo := &mockedMachineInfo - if testCase.isTopologyBroken { - machineInfo = &cadvisorapi.MachineInfo{} - } - sDir, err := os.MkdirTemp("", "cpu_manager_test") - if err != nil { - t.Errorf("cannot create state file: %s", err.Error()) - } - defer removeStateDirectory(t, sDir) - - logger, _ := ktesting.NewTestContext(t) - mgr, err := NewManager(logger, testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.New(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) - if testCase.expectedError != nil { - if !strings.Contains(err.Error(), testCase.expectedError.Error()) { - t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) - } - } else { - rawMgr := mgr.(*manager) - if rawMgr.policy.Name() != testCase.expectedPolicy { - t.Errorf("Unexpected policy name. Have: %q wants %q", rawMgr.policy.Name(), testCase.expectedPolicy) - } - if rawMgr.topology == nil { - t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %v", rawMgr.policy.Name(), rawMgr.topology) - } - } - }) - - } -} - -func TestCPUManagerRemoveWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) - containerID := "fakeID" - containerMap := containermap.NewContainerMap() - - logger, _ := ktesting.NewTestContext(t) - mgr := &manager{ - policy: &mockPolicy{ - err: nil, - }, - state: &mockState{ - allocations: state.ContainerCPUAllocations{}, - defaultCPUSet: cpuset.New(), - }, - lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{}, - containerMap: containerMap, - activePods: func() []*v1.Pod { return nil }, - podStatusProvider: mockPodStatusProvider{}, - } - - containerMap.Add("", "", containerID) - err := mgr.RemoveContainer(logger, containerID) - if err != nil { - t.Errorf("CPU Manager RemoveContainer() error. expected error to be nil but got: %v", err) - } - - mgr = &manager{ - policy: &mockPolicy{ - err: fmt.Errorf("fake error"), - }, - state: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{}, - containerMap: containerMap, - activePods: func() []*v1.Pod { return nil }, - podStatusProvider: mockPodStatusProvider{}, - } - - containerMap.Add("", "", containerID) - err = mgr.RemoveContainer(logger, containerID) - if !reflect.DeepEqual(err, fmt.Errorf("fake error")) { - t.Errorf("CPU Manager RemoveContainer() error. expected error: fake error but got: %v", err) - } -} - -func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) { - if runtime.GOOS == "windows" { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsCPUAndMemoryAffinity, true) - } - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingExclusiveCPUs, true) - logger, _ := ktesting.NewTestContext(t) - - testPolicy, _ := NewStaticPolicy( - logger, - &topology.CPUTopology{ - NumCPUs: 8, - NumSockets: 2, - NumCores: 4, - CPUDetails: map[int]topology.CPUInfo{ - 0: {CoreID: 0, SocketID: 0}, - 1: {CoreID: 1, SocketID: 0}, - 2: {CoreID: 2, SocketID: 0}, - 3: {CoreID: 3, SocketID: 0}, - 4: {CoreID: 0, SocketID: 1}, - 5: {CoreID: 1, SocketID: 1}, - 6: {CoreID: 2, SocketID: 1}, - 7: {CoreID: 3, SocketID: 1}, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerBID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2), + "fakeContainerBID": cpuset.New(3, 4), + }, + stAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 4)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(2, 3)}, + }, + }, + stDefaultCPUSet: cpuset.New(5, 6, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(3, 4)}, + }, + }, + lastUpdateStDefaultCPUSet: cpuset.New(), + expectStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 4)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(2, 3)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(5, 6, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 4)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(2, 3)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 4), + "fakeContainerBID": cpuset.New(2, 3), }, + expectSucceededContainerName: []string{"fakeContainerAName", "fakeContainerBName"}, + expectFailedContainerName: []string{}, }, - 0, - cpuset.New(), - topologymanager.NewFakeManager(), - nil) - - testCases := []struct { - description string - policy Policy - activePods []*v1.Pod - pspPS v1.PodStatus - pspFound bool - updateErr error - stAllocations state.ContainerCPUAllocations - stDefaultCPUSet cpuset.CPUSet - lastUpdateStAllocations state.ContainerCPUAllocations - lastUpdateStDefaultCPUSet cpuset.CPUSet - expectStAllocations state.ContainerCPUAllocations - expectStDefaultCPUSet cpuset.CPUSet - expectSucceededContainerName string - expectFailedContainerName string - }{ { - description: "cpu manager reconcile - no error", + description: "cpu manager reconcile - exclusive cpu containers scaled down and up", policy: testPolicy, activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodAName", + UID: "fakePodAUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", }, }, }, @@ -2249,161 +4108,257 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) pspPS: v1.PodStatus{ ContainerStatuses: []v1.ContainerStatus{ { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", State: v1.ContainerState{ Running: &v1.ContainerStateRunning{}, }, }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: nil, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerBID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2), + "fakeContainerBID": cpuset.New(3, 4), + "fakeContainerCID": cpuset.New(5, 6), + }, stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 2, 5, 6)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(3)}, + }, + }, + stDefaultCPUSet: cpuset.New(4), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(3, 4)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(5, 6), Resized: cpuset.New()}, }, }, - stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, lastUpdateStDefaultCPUSet: cpuset.New(), expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 2, 5, 6)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(3)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(4), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(1), Resized: cpuset.New(1, 2, 5, 6)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(3), Resized: cpuset.New(3)}, }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(5, 6), Resized: cpuset.New(4)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(1, 2, 5, 6), + "fakeContainerBID": cpuset.New(3), + "fakeContainerCID": cpuset.New(4), }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectSucceededContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{}, }, { - description: "cpu manager reconcile init container - no error", + description: "cpu manager reconcile - fail in first reconcile pass does not cause conflict", policy: testPolicy, activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodAName", + UID: "fakePodAUID", }, Spec: v1.PodSpec{ - InitContainers: []v1.Container{ + Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", }, }, }, }, }, pspPS: v1.PodStatus{ - InitContainerStatuses: []v1.ContainerStatus{ + ContainerStatuses: []v1.ContainerStatus{ { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", State: v1.ContainerState{ Running: &v1.ContainerStateRunning{}, }, }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + nil, //fakeContainerBID pass 2 ok + nil, //fakeContainerCID pass 3 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(5, 8)}, + }, + }, + stDefaultCPUSet: cpuset.New(4, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(6, 7, 8)}, }, }, - stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, lastUpdateStDefaultCPUSet: cpuset.New(), expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(5, 8)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(4, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(4, 7)}, }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(5, 8)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(4, 7), + "fakeContainerCID": cpuset.New(5, 8), }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectSucceededContainerName: []string{"fakeContainerBName", "fakeContainerCName"}, + expectFailedContainerName: []string{"fakeContainerAName"}, }, { - description: "cpu manager reconcile - pod status not found", + description: "cpu manager reconcile - fail in first reconcile pass causes conflict in second pass", policy: testPolicy, activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodAName", + UID: "fakePodAUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerAName", }, }, }, }, - }, - pspPS: v1.PodStatus{}, - pspFound: false, - updateErr: nil, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, - lastUpdateStDefaultCPUSet: cpuset.New(), - expectStAllocations: state.ContainerCPUAllocations{}, - expectStDefaultCPUSet: cpuset.New(), - expectSucceededContainerName: "", - expectFailedContainerName: "", - }, - { - description: "cpu manager reconcile - container state not found", - policy: testPolicy, - activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodBName", + UID: "fakePodBUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerBName", }, }, }, }, - }, - pspPS: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - { - Name: "fakeContainerName1", - ContainerID: "docker://fakeContainerID", - }, - }, - }, - pspFound: true, - updateErr: nil, - stAllocations: state.ContainerCPUAllocations{}, - stDefaultCPUSet: cpuset.New(), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, - lastUpdateStDefaultCPUSet: cpuset.New(), - expectStAllocations: state.ContainerCPUAllocations{}, - expectStDefaultCPUSet: cpuset.New(), - expectSucceededContainerName: "", - expectFailedContainerName: "fakeContainerName", - }, - { - description: "cpu manager reconclie - cpuset is empty", - policy: testPolicy, - activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodCName", + UID: "fakePodCUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerCName", }, }, }, @@ -2412,94 +4367,129 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) pspPS: v1.PodStatus{ ContainerStatuses: []v1.ContainerStatus{ { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", State: v1.ContainerState{ Running: &v1.ContainerStateRunning{}, }, }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + nil, //fakeContainerCID pass 3 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(8)}, + }, + }, + stDefaultCPUSet: cpuset.New(1, 4, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(6, 7, 8)}, }, }, - stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, lastUpdateStDefaultCPUSet: cpuset.New(), expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(8)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 4, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(8)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(8), }, - expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7), - expectSucceededContainerName: "", - expectFailedContainerName: "fakeContainerName", + expectSucceededContainerName: []string{"fakeContainerCName"}, + expectFailedContainerName: []string{"fakeContainerAName", "fakeContainerBName"}, }, { - description: "cpu manager reconclie - container update error", + description: "cpu manager reconcile - fail in first reconcile pass causes conflict in second pass which causes conflict in third pass", policy: testPolicy, activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodAName", + UID: "fakePodAUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerAName", }, }, }, }, - }, - pspPS: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, }, }, }, - }, - pspFound: true, - updateErr: fmt.Errorf("fake container update error"), - stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, - }, - }, - stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, - lastUpdateStDefaultCPUSet: cpuset.New(), - expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, - }, - }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "", - expectFailedContainerName: "fakeContainerName", - }, - { - description: "cpu manager reconcile - state has inactive container", - policy: testPolicy, - activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodCName", + UID: "fakePodCUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerCName", }, }, }, @@ -2508,49 +4498,128 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) pspPS: v1.PodStatus{ ContainerStatuses: []v1.ContainerStatus{ { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", State: v1.ContainerState{ Running: &v1.ContainerStateRunning{}, }, }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, }, - "secondfakePodUID": map[string]state.ContainerCPUAllocation{ - "secondfakeContainerName": {Original: cpuset.New(3, 4), Resized: cpuset.New()}, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(5, 8)}, + }, + }, + stDefaultCPUSet: cpuset.New(1, 4, 7), + lastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(6, 7, 8)}, }, }, - stDefaultCPUSet: cpuset.New(5, 6, 7), - lastUpdateStAllocations: state.ContainerCPUAllocations{}, lastUpdateStDefaultCPUSet: cpuset.New(), expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(5, 8)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 4, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(8)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(8), }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectSucceededContainerName: []string{}, + expectFailedContainerName: []string{"fakeContainerAName", "fakeContainerBName", "fakeContainerCName"}, }, { - description: "cpu manager reconcile - last update state is current", + description: "cpu manager reconcile - fail in first reconcile pass causes conflict in third pass", policy: testPolicy, activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodAName", + UID: "fakePodAUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", }, }, }, @@ -2559,50 +4628,129 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) pspPS: v1.PodStatus{ ContainerStatuses: []v1.ContainerStatus{ { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", State: v1.ContainerState{ Running: &v1.ContainerStateRunning{}, }, }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: []error{ + fmt.Errorf("fakeContainerAID pass 1 error"), + nil, //fakeContainerCID pass 1 ok + nil, //fakeContainerBID pass 2 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(2, 5, 8)}, }, }, - stDefaultCPUSet: cpuset.New(5, 6, 7), + stDefaultCPUSet: cpuset.New(4, 7), lastUpdateStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(6, 7, 8)}, }, }, - lastUpdateStDefaultCPUSet: cpuset.New(5, 6, 7), + lastUpdateStDefaultCPUSet: cpuset.New(), expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 3, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(2, 5, 8)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(4, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(4, 7)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(8)}, }, }, - expectStDefaultCPUSet: cpuset.New(5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(4, 7), + "fakeContainerCID": cpuset.New(8), + }, + expectSucceededContainerName: []string{"fakeContainerBName"}, + expectFailedContainerName: []string{"fakeContainerAName", "fakeContainerCName"}, }, { - description: "cpu manager reconcile - last update state is not current", + description: "cpu manager reconcile - fail in second reconcile pass causes conflict in third pass", policy: testPolicy, activePods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "fakePodName", - UID: "fakePodUID", + Name: "fakePodAName", + UID: "fakePodAUID", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "fakeContainerName", + Name: "fakeContainerAName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodBName", + UID: "fakePodBUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerBName", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePodCName", + UID: "fakePodCUID", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fakeContainerCName", }, }, }, @@ -2611,36 +4759,90 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) pspPS: v1.PodStatus{ ContainerStatuses: []v1.ContainerStatus{ { - Name: "fakeContainerName", - ContainerID: "docker://fakeContainerID", + Name: "fakeContainerAName", + ContainerID: "docker://fakeContainerAID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerBName", + ContainerID: "docker://fakeContainerBID", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "fakeContainerCName", + ContainerID: "docker://fakeContainerCID", State: v1.ContainerState{ Running: &v1.ContainerStateRunning{}, }, }, }, }, - pspFound: true, - updateErr: nil, + pspFound: true, + updateErr: []error{ + nil, //fakeContainerAID pass 1 ok + nil, //fakeContainerCID pass 1 ok + fmt.Errorf("fakeContainerBID pass 2 error"), + nil, //fakeContainerAID pass 3 ok + }, + containerIDsWithExclusiveCPUs: []string{"fakeContainerAID", "fakeContainerCID"}, + containerRuntimeInitialState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 1, 2), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(6, 7, 8), + }, stAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 6)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(2, 5, 8)}, }, }, - stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), + stDefaultCPUSet: cpuset.New(1, 4, 7), lastUpdateStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(3, 4), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 1, 2)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(6, 7, 8)}, }, }, - lastUpdateStDefaultCPUSet: cpuset.New(1, 2, 5, 6, 7), + lastUpdateStDefaultCPUSet: cpuset.New(), expectStAllocations: state.ContainerCPUAllocations{ - "fakePodUID": map[string]state.ContainerCPUAllocation{ - "fakeContainerName": {Original: cpuset.New(1, 2), Resized: cpuset.New()}, + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 6)}, }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(2, 5, 8)}, + }, + }, + expectStDefaultCPUSet: cpuset.New(1, 4, 7), + expectLastUpdateStAllocations: state.ContainerCPUAllocations{ + "fakePodAUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerAName": {Original: cpuset.New(0), Resized: cpuset.New(0, 6)}, + }, + "fakePodBUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerBName": {Original: cpuset.New(4), Resized: cpuset.New(3, 4, 5)}, + }, + "fakePodCUID": map[string]state.ContainerCPUAllocation{ + "fakeContainerCName": {Original: cpuset.New(8), Resized: cpuset.New(8)}, + }, + }, + expectLastUpdateStDefaultCPUSet: cpuset.New(), + expectContainerRuntimeState: map[string]cpuset.CPUSet{ + "fakeContainerAID": cpuset.New(0, 6), + "fakeContainerBID": cpuset.New(3, 4, 5), + "fakeContainerCID": cpuset.New(8), }, - expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7), - expectSucceededContainerName: "fakeContainerName", - expectFailedContainerName: "", + expectSucceededContainerName: []string{"fakeContainerAName"}, + expectFailedContainerName: []string{"fakeContainerBName", "fakeContainerCName"}, }, } @@ -2653,8 +4855,13 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) defaultCPUSet: testCase.stDefaultCPUSet, }, lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{ - err: testCase.updateErr, + containerRuntime: &mockRuntimeService{ + err: testCase.updateErr, + containerIDsWithExclusiveCPUs: testCase.containerIDsWithExclusiveCPUs, + state: testCase.containerRuntimeInitialState, + testCPUConflicts: true, + testCaseDescription: testCase.description, + t: t, }, containerMap: containermap.NewContainerMap(), activePods: func() []*v1.Pod { @@ -2666,6 +4873,8 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) }, } mgr.sourcesReady = &sourcesReadyStub{} + mgr.lastUpdateState.SetCPUAllocations(testCase.lastUpdateStAllocations) + mgr.lastUpdateState.SetDefaultCPUSet(testCase.lastUpdateStDefaultCPUSet) success, failure := mgr.reconcileState(context.Background()) if !reflect.DeepEqual(testCase.expectStAllocations, mgr.state.GetCPUAllocations()) { @@ -2680,33 +4889,48 @@ func TestReconcileStateWithInPlacePodVerticalScalingExclusiveCPUs(t *testing.T) } - if testCase.expectSucceededContainerName != "" { + if !reflect.DeepEqual(testCase.expectLastUpdateStAllocations, mgr.lastUpdateState.GetCPUAllocations()) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected lastUpdateState container cpu allocations: %v, actual: %v", testCase.expectLastUpdateStAllocations, mgr.lastUpdateState.GetCPUAllocations()) + } + + if !reflect.DeepEqual(testCase.expectLastUpdateStDefaultCPUSet, mgr.lastUpdateState.GetDefaultCPUSet()) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected lastUpdateState default cpuset: %v, actual: %v", testCase.expectLastUpdateStDefaultCPUSet, mgr.lastUpdateState.GetDefaultCPUSet()) + } + + if !reflect.DeepEqual(testCase.expectContainerRuntimeState, mgr.containerRuntime.(*mockRuntimeService).state) { + t.Errorf("%v", testCase.description) + t.Errorf("Expected containerRuntimeState: %v, actual: %v", testCase.expectContainerRuntimeState, mgr.containerRuntime.(*mockRuntimeService).state) + } + + for _, name := range testCase.expectSucceededContainerName { // Search succeeded reconciled containers for the supplied name. foundSucceededContainer := false for _, reconciled := range success { - if reconciled.containerName == testCase.expectSucceededContainerName { + if reconciled.containerName == name { foundSucceededContainer = true break } } if !foundSucceededContainer { t.Errorf("%v", testCase.description) - t.Errorf("Expected reconciliation success for container: %s", testCase.expectSucceededContainerName) + t.Errorf("Expected reconciliation success for container: %s", name) } } - if testCase.expectFailedContainerName != "" { + for _, name := range testCase.expectFailedContainerName { // Search failed reconciled containers for the supplied name. foundFailedContainer := false for _, reconciled := range failure { - if reconciled.containerName == testCase.expectFailedContainerName { + if reconciled.containerName == name { foundFailedContainer = true break } } if !foundFailedContainer { t.Errorf("%v", testCase.description) - t.Errorf("Expected reconciliation failure for container: %s", testCase.expectFailedContainerName) + t.Errorf("Expected reconciliation failure for container: %s", name) } } } @@ -2740,7 +4964,7 @@ func TestCPUManagerAddWithResvListWithInPlacePodVerticalScalingExclusiveCPUs(t * nil) testCases := []struct { description string - updateErr error + updateErr []error policy Policy expCPUSet cpuset.CPUSet expAllocateErr error @@ -2764,7 +4988,7 @@ func TestCPUManagerAddWithResvListWithInPlacePodVerticalScalingExclusiveCPUs(t * defaultCPUSet: cpuset.New(0, 1, 2, 3), }, lastUpdateState: state.NewMemoryState(logger), - containerRuntime: mockRuntimeService{ + containerRuntime: &mockRuntimeService{ err: testCase.updateErr, }, containerMap: containermap.NewContainerMap(), diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_windows_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_windows_test.go new file mode 100644 index 0000000000000..ccc1447f76394 --- /dev/null +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_windows_test.go @@ -0,0 +1,42 @@ +//go:build windows + +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cpumanager + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + kubefeatures "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/winstats" + "k8s.io/utils/cpuset" +) + +func (rt mockRuntimeService) getCPUSetFromResources(resources *runtimeapi.ContainerResources) cpuset.CPUSet { + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WindowsCPUAndMemoryAffinity) { + return cpuset.New() + } + if resources != nil && resources.Windows != nil { + var cpus []int + for _, affinity := range resources.Windows.AffinityCpus { + ga := winstats.GroupAffinity{Mask: affinity.CpuMask, Group: uint16(affinity.CpuGroup)} + cpus = append(cpus, ga.Processors()...) + } + return cpuset.New(cpus...) + } + return cpuset.New() +} diff --git a/pkg/kubelet/cm/cpumanager/policy_static_restore_test.go b/pkg/kubelet/cm/cpumanager/policy_static_restore_test.go index 2476d44888b07..06a3f807f62b4 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static_restore_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_static_restore_test.go @@ -141,7 +141,7 @@ func TestCPUManagerRestoreState(t *testing.T) { pod.UID = types.UID("pod1") // Start manager to initialize state and activePods - err = mgr.Start(context.Background(), func() []*v1.Pod { return []*v1.Pod{pod} }, &sourcesReadyStub{}, mockPodStatusProvider{}, mockRuntimeService{}, containermap.NewContainerMap()) + err = mgr.Start(context.Background(), func() []*v1.Pod { return []*v1.Pod{pod} }, &sourcesReadyStub{}, mockPodStatusProvider{}, &mockRuntimeService{}, containermap.NewContainerMap()) if err != nil { t.Fatalf("could not start manager: %v", err) } @@ -205,7 +205,7 @@ func TestCPUManagerRestoreState(t *testing.T) { t.Fatalf("could not create manager 2: %v", err) } - err = mgr2.Start(context.Background(), func() []*v1.Pod { return []*v1.Pod{pod} }, &sourcesReadyStub{}, mockPodStatusProvider{}, mockRuntimeService{}, containermap.NewContainerMap()) + err = mgr2.Start(context.Background(), func() []*v1.Pod { return []*v1.Pod{pod} }, &sourcesReadyStub{}, mockPodStatusProvider{}, &mockRuntimeService{}, containermap.NewContainerMap()) if err != nil { t.Fatalf("could not start manager 2: %v", err) }