]> git.feebdaed.xyz Git - 0xmirror/kubernetes.git/commitdiff
Test fixes
authorndixita <ndixita@google.com>
Wed, 12 Nov 2025 05:53:41 +0000 (05:53 +0000)
committerndixita <ndixita@google.com>
Wed, 12 Nov 2025 06:21:06 +0000 (06:21 +0000)
Signed-off-by: ndixita <ndixita@google.com>
pkg/kubelet/allocation/allocation_manager_test.go
test/e2e/common/node/framework/podresize/resize.go
test/e2e/common/node/pod_level_resources_resize.go
test/e2e/common/node/pod_resize.go
test/e2e/node/pod_resize.go

index d94a371318eecec2e195bfae47af0be9ee2f26b9..b6a3b5e3e978aecd809061e7b9f80f9ef77072b5 100644 (file)
@@ -818,10 +818,8 @@ func TestRetryPendingResizes(t *testing.T) {
                                        if tt.expectedAllocatedPodReqs != nil {
                                                require.NotNil(t, updatedPod.Spec.Resources)
                                                assert.Equal(t, tt.expectedAllocatedPodReqs, updatedPod.Spec.Resources.Requests, "updated pod spec pod requests")
-                                       } else {
-                                               if updatedPod.Spec.Resources != nil {
-                                                       assert.Empty(t, updatedPod.Spec.Resources.Requests, "updated pod spec pod requests should be empty")
-                                               }
+                                       } else if updatedPod.Spec.Resources != nil {
+                                               assert.Empty(t, updatedPod.Spec.Resources.Requests, "updated pod spec pod requests should be empty")
                                        }
 
                                        alloc, found := allocationManager.(*manager).allocated.GetPodResourceInfo(newPod.UID)
@@ -1570,7 +1568,7 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        admitFunc:                      nil,
                        expectAdmit:                    true,
                        // allocated resources updated with pod1's resources
-                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{podResources: cpu1Mem1G, containerResources: cpu1Mem1G}},
+                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {podResources: cpu1Mem1G, containerResources: cpu1Mem1G}},
                        ipprPLRFeatureGate:              true,
                },
                {
@@ -1580,11 +1578,11 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        podToAdd:                        pod1SmallWithPLR,
                        admitFunc:                       nil,
                        expectAdmit:                     true,
-                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
                },
                {
                        name:                           "PLR IPPR Enabled - New pod not admitted due to insufficient resources",
-                       initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+                       initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
                        currentActivePods:              []*v1.Pod{pod1Small},
                        podToAdd:                       pod2LargeWithPLR,
                        admitFunc: func(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
@@ -1602,12 +1600,12 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        admissionFailureReason:  "OutOfcpu",
                        admissionFailureMessage: "not enough CPUs available for pod ns2/pod2, requested: 2, available:1",
                        // allocated resources not modified
-                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
                        ipprPLRFeatureGate:              true,
                },
                {
                        name:                           "PLR IPPR Disabled - New pod not admitted due to insufficient resources",
-                       initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+                       initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
                        currentActivePods:              []*v1.Pod{pod1Small},
                        podToAdd:                       pod2LargeWithPLR,
                        admitFunc: func(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
@@ -1625,21 +1623,21 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        admissionFailureReason:  "OutOfcpu",
                        admissionFailureMessage: "not enough CPUs available for pod ns2/pod2, requested: 2, available:1",
                        // allocated resources not modified
-                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
                },
                {
                        name: "PLR IPPR Enabled - no pod resize request. Resource request same as existing allocation",
                        initialAllocatedResourcesState: map[types.UID]resourceState{
-                               pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
-                               pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
                        },
                        currentActivePods: []*v1.Pod{pod1SmallWithPLR},
                        podToAdd:          pod1SmallWithPLR,
                        admitFunc:         nil,
                        expectAdmit:       true,
                        expectedAllocatedResourcesState: map[types.UID]resourceState{
-                               pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
-                               pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
                        },
                        ipprPLRFeatureGate: true,
                },
@@ -1651,14 +1649,14 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        admitFunc:                      nil,
                        expectAdmit:                    true,
                        // pod2's resources added to allocated resources
-                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu2Mem2G, podResources: cpu2Mem2G}},
+                       expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu2Mem2G, podResources: cpu2Mem2G}},
                        ipprPLRFeatureGate:              true,
                },
                {
                        name: "PLR IPPR Enabled - request different from current allocation. Pod still admitted based on existing allocation, but allocated resources remains unchanges.",
                        initialAllocatedResourcesState: map[types.UID]resourceState{
-                               pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
-                               pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
                        },
                        currentActivePods: []*v1.Pod{pod1SmallWithPLR, pod2SmallWithPLR},
                        podToAdd:          pod1LargeWithPLR,
@@ -1677,14 +1675,14 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        expectAdmit: true,
                        //  allocated Resources state must not be updated.
                        expectedAllocatedResourcesState: map[types.UID]resourceState{
-                               pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
-                               pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+                               pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
                        },
                        ipprPLRFeatureGate: true,
                },
                {
                        name:                           "PLR IPPR Disabled - request different from current allocation. Admission fails. Allocated resources not updated.",
-                       initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+                       initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
                        currentActivePods:              []*v1.Pod{pod1SmallWithPLR},
                        podToAdd:                       pod1LargeWithPLR,
                        admitFunc: func(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
@@ -1704,7 +1702,7 @@ func TestAllocationManagerAddPodWithPLR(t *testing.T) {
                        admissionFailureMessage: "not enough CPUs available for pod ns1/pod1, requested: 2, available:1",
                        // allocated Resources state must not be updated.
                        expectedAllocatedResourcesState: map[types.UID]resourceState{
-                               pod1UID: resourceState{containerResources: cpu1Mem1G},
+                               pod1UID: {containerResources: cpu1Mem1G},
                        },
                        ipprPLRFeatureGate: false,
                },
index 5f1e30030a488a1176a0b609adb32927bb46b2ab..fc6c92452bae66a65779ded0faa3a132cd4d9a54 100644 (file)
@@ -196,6 +196,18 @@ func VerifyPodStatusResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo)
        return utilerrors.NewAggregate(errs)
 }
 
+func VerifyPodLevelStatusResources(gotPod *v1.Pod, wantPodResources *v1.ResourceRequirements) error {
+       var errs []error
+       if err := framework.Gomega().Expect(gotPod.Status.AllocatedResources).To(gomega.BeComparableTo(wantPodResources.Requests)); err != nil {
+               errs = append(errs, fmt.Errorf("pod[%s] status allocatedResources mismatch: %w", gotPod.Name, err))
+       }
+
+       if err := framework.Gomega().Expect(gotPod.Status.Resources).To(gomega.BeComparableTo(wantPodResources)); err != nil {
+               errs = append(errs, fmt.Errorf("pod[%s] status resources mismatch: %w", gotPod.Name, err))
+       }
+       return utilerrors.NewAggregate(errs)
+}
+
 func verifyPodContainersStatusResources(gotCtrStatuses []v1.ContainerStatus, wantCtrs []v1.Container) error {
        ginkgo.GinkgoHelper()
 
index 703a9b6f678500c6163b0ae558bc4533d40a95f8..92690114c1dfe42adb03f5f66447be2eb3b70197 100644 (file)
@@ -497,15 +497,10 @@ func VerifyPodLevelStatus(gotPod *v1.Pod) error {
        // cgroup values is resulting in indeterministic rounded off values
 
        var wantAllocatedResources v1.ResourceList
-       aggrReq, aggrLim := podresize.AggregateContainerResources(gotPod.Spec)
-       wantStatusResources = &v1.ResourceRequirements{Requests: aggrReq, Limits: aggrLim}
+       aggrReq, _ := podresize.AggregateContainerResources(gotPod.Spec)
        wantAllocatedResources = aggrReq
 
        if gotPod.Spec.Resources != nil {
-               wantStatusResources = &v1.ResourceRequirements{
-                       Requests: gotPod.Spec.Resources.Requests,
-                       Limits:   gotPod.Spec.Resources.Limits,
-               }
                wantAllocatedResources = gotPod.Spec.Resources.Requests
        }
 
@@ -541,9 +536,6 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
 
                // 1. Decrease the limit a little bit - should succeed
                ginkgo.By("Patching pod with a slightly lowered memory limit")
-               viableLoweredLimit := []podresize.ResizableContainerInfo{{
-                       Name: "c1",
-               }}
                viableLoweredLimitPLR := &v1.ResourceRequirements{
                        Requests: v1.ResourceList{
                                v1.ResourceMemory: resource.MustParse(reducedMem),
@@ -561,8 +553,8 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
                podresize.VerifyPodResources(testPod, containers, viableLoweredLimitPLR)
 
                ginkgo.By("waiting for viable lowered limit to be actuated")
-               resizedPod := podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod, viableLoweredLimit)
-               podresize.ExpectPodResized(ctx, f, resizedPod, viableLoweredLimit)
+               resizedPod := podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod, containers)
+               podresize.ExpectPodResized(ctx, f, resizedPod, containers)
 
                // There is some latency after container startup before memory usage is scraped. On CRI-O
                // this latency is much higher, so wait enough time for cAdvisor to scrape metrics twice.
@@ -574,11 +566,16 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
                // 2. Decrease the limit down to a tiny amount - should fail
                const nonViableMemoryLimit = "10Ki"
                ginkgo.By("Patching pod with a greatly lowered memory limit")
-               nonViableLoweredLimit := []podresize.ResizableContainerInfo{{
-                       Name:      "c1",
-                       Resources: &cgroups.ContainerResources{MemReq: nonViableMemoryLimit, MemLim: nonViableMemoryLimit},
-               }}
-               patch = podresize.MakeResizePatch(viableLoweredLimit, nonViableLoweredLimit, nil, nil)
+               nonViableLoweredLimitPLR := &v1.ResourceRequirements{
+                       Requests: v1.ResourceList{
+                               v1.ResourceMemory: resource.MustParse(nonViableMemoryLimit),
+                       },
+                       Limits: v1.ResourceList{
+                               v1.ResourceMemory: resource.MustParse(nonViableMemoryLimit),
+                       },
+               }
+
+               patch = podresize.MakeResizePatch(containers, containers, viableLoweredLimitPLR, nonViableLoweredLimitPLR)
                testPod, pErr = f.ClientSet.CoreV1().Pods(testPod.Namespace).Patch(ctx, testPod.Name,
                        types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "resize")
                framework.ExpectNoError(pErr, "failed to patch pod for viable lowered limit")
@@ -587,8 +584,8 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
                        Eventually(ctx, framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(testPod.Namespace).Get, testPod.Name, metav1.GetOptions{}))).
                        WithTimeout(f.Timeouts.PodStart).
                        Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
-                               // If VerifyPodStatusResources succeeds, it means the resize completed.
-                               if podresize.VerifyPodStatusResources(pod, nonViableLoweredLimit) == nil {
+                               // If VerifyPodLevelStatusResources succeeds, it means the resize completed.
+                               if podresize.VerifyPodLevelStatusResources(pod, nonViableLoweredLimitPLR) == nil {
                                        return nil, gomega.StopTrying("non-viable resize unexpectedly completed")
                                }
 
@@ -603,6 +600,7 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
                                                }, nil
                                        }
                                }
+
                                if inProgressCondition == nil {
                                        return func() string { return "resize is not in progress" }, nil
                                }
@@ -610,7 +608,6 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
                                if inProgressCondition.Reason != v1.PodReasonError {
                                        return func() string { return "in-progress reason is not error" }, nil
                                }
-
                                expectedMsg := regexp.MustCompile(`memory limit \(\d+\) below current usage`)
                                if !expectedMsg.MatchString(inProgressCondition.Message) {
                                        return func() string {
@@ -621,17 +618,17 @@ func doPodLevelResourcesMemoryLimitDecreaseTest(f *framework.Framework) {
                        })),
                )
                ginkgo.By("verifying pod status resources still match the viable resize")
-               framework.ExpectNoError(podresize.VerifyPodStatusResources(testPod, viableLoweredLimit))
+               framework.ExpectNoError(podresize.VerifyPodStatusResources(testPod, containers))
 
                // 3. Revert the limit back to the original value - should succeed
                ginkgo.By("Patching pod to revert to original state")
-               patch = podresize.MakeResizePatch(nonViableLoweredLimit, containers, nil, nil)
+               patch = podresize.MakeResizePatch(containers, containers, viableLoweredLimitPLR, originalPLR)
                testPod, pErr = f.ClientSet.CoreV1().Pods(testPod.Namespace).Patch(ctx, testPod.Name,
                        types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "resize")
                framework.ExpectNoError(pErr, "failed to patch pod back to original values")
 
                ginkgo.By("verifying pod patched for original values")
-               podresize.VerifyPodResources(testPod, containers, nil)
+               podresize.VerifyPodResources(testPod, containers, originalPLR)
 
                ginkgo.By("waiting for the original values to be actuated")
                resizedPod = podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod, containers)
index e4dcccd08871489a21e2fdb00a5cd6a88e3f5c19..0f02fd405dfd3020c5b950e6edbd71c6848b8ba6 100644 (file)
@@ -807,7 +807,7 @@ func doPodResizeReadAndReplaceTests(f *framework.Framework) {
 //        b) api-server in services doesn't start with --enable-admission-plugins=ResourceQuota
 //           and is not possible to start it from TEST_ARGS
 //     Above tests in test/e2e/node/pod_resize.go
-var _ = SIGDescribe("Pod InPlace Resize", func() {
+var _ = SIGDescribe("Pod InPlace Resize Container", func() {
        f := framework.NewDefaultFramework("pod-resize-tests")
 
        ginkgo.BeforeEach(func(ctx context.Context) {
index f006f51248093e5e4befa7a527e59868d002f525..07bfd4cee3d637d57349c1d12291bbcbad7555b5 100644 (file)
@@ -77,11 +77,11 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
        })
 
        ginkgo.DescribeTable("pod-resize-resource-quota-test",
-               func(ctx context.Context, desiredContainers []podresize.ResizableContainerInfo, expectedContainers []podresize.ResizableContainerInfo, podResources *v1.ResourceRequirements, wantError string) {
+               func(ctx context.Context, desiredContainers []podresize.ResizableContainerInfo, expectedContainers []podresize.ResizableContainerInfo, wantError string) {
                        tStamp := strconv.Itoa(time.Now().Nanosecond())
-                       testPod1 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, originalContainers, podResources)
+                       testPod1 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, originalContainers, nil)
                        testPod1 = e2epod.MustMixinRestrictedPodSecurity(testPod1)
-                       testPod2 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, originalContainers, podResources)
+                       testPod2 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, originalContainers, nil)
                        testPod2 = e2epod.MustMixinRestrictedPodSecurity(testPod2)
 
                        ginkgo.By("creating pods")
@@ -140,7 +140,6 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
                                },
                        },
                        originalContainers,
-                       nil,
                        "exceeded quota: resize-resource-quota, requested: cpu=300m, used: cpu=600m, limited: cpu=800m",
                ),
 
@@ -152,7 +151,6 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
                                },
                        },
                        originalContainers,
-                       nil,
                        "exceeded quota: resize-resource-quota, requested: memory=450Mi, used: memory=600Mi, limited: memory=800Mi",
                ),
 
@@ -164,7 +162,6 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
                                },
                        },
                        originalContainers,
-                       nil,
                        "exceeded quota: resize-resource-quota, requested: cpu=300m,memory=450Mi, used: cpu=600m,memory=600Mi, limited: cpu=800m,memory=800Mi",
                ),
 
@@ -181,7 +178,6 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
                                        Resources: &cgroups.ContainerResources{CPUReq: "350m", CPULim: "350m", MemReq: "300Mi", MemLim: "300Mi"},
                                },
                        },
-                       nil,
                        "",
                ),
 
@@ -198,7 +194,6 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
                                        Resources: &cgroups.ContainerResources{CPUReq: "350m", CPULim: "350m", MemReq: "350Mi", MemLim: "350Mi"},
                                },
                        },
-                       nil,
                        "",
                ),
 
@@ -215,7 +210,6 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
                                        Resources: &cgroups.ContainerResources{CPUReq: "400m", CPULim: "400m", MemReq: "400Mi", MemLim: "400Mi"},
                                },
                        },
-                       nil,
                        "",
                ),
        )