if tt.expectedAllocatedPodReqs != nil {
require.NotNil(t, updatedPod.Spec.Resources)
assert.Equal(t, tt.expectedAllocatedPodReqs, updatedPod.Spec.Resources.Requests, "updated pod spec pod requests")
- } else {
- if updatedPod.Spec.Resources != nil {
- assert.Empty(t, updatedPod.Spec.Resources.Requests, "updated pod spec pod requests should be empty")
- }
+ } else if updatedPod.Spec.Resources != nil {
+ assert.Empty(t, updatedPod.Spec.Resources.Requests, "updated pod spec pod requests should be empty")
}
alloc, found := allocationManager.(*manager).allocated.GetPodResourceInfo(newPod.UID)
admitFunc: nil,
expectAdmit: true,
// allocated resources updated with pod1's resources
- expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{podResources: cpu1Mem1G, containerResources: cpu1Mem1G}},
+ expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {podResources: cpu1Mem1G, containerResources: cpu1Mem1G}},
ipprPLRFeatureGate: true,
},
{
podToAdd: pod1SmallWithPLR,
admitFunc: nil,
expectAdmit: true,
- expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+ expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
},
{
name: "PLR IPPR Enabled - New pod not admitted due to insufficient resources",
- initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+ initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
currentActivePods: []*v1.Pod{pod1Small},
podToAdd: pod2LargeWithPLR,
admitFunc: func(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
admissionFailureReason: "OutOfcpu",
admissionFailureMessage: "not enough CPUs available for pod ns2/pod2, requested: 2, available:1",
// allocated resources not modified
- expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+ expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
ipprPLRFeatureGate: true,
},
{
name: "PLR IPPR Disabled - New pod not admitted due to insufficient resources",
- initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+ initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
currentActivePods: []*v1.Pod{pod1Small},
podToAdd: pod2LargeWithPLR,
admitFunc: func(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
admissionFailureReason: "OutOfcpu",
admissionFailureMessage: "not enough CPUs available for pod ns2/pod2, requested: 2, available:1",
// allocated resources not modified
- expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+ expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
},
{
name: "PLR IPPR Enabled - no pod resize request. Resource request same as existing allocation",
initialAllocatedResourcesState: map[types.UID]resourceState{
- pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
- pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
},
currentActivePods: []*v1.Pod{pod1SmallWithPLR},
podToAdd: pod1SmallWithPLR,
admitFunc: nil,
expectAdmit: true,
expectedAllocatedResourcesState: map[types.UID]resourceState{
- pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
- pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
},
ipprPLRFeatureGate: true,
},
admitFunc: nil,
expectAdmit: true,
// pod2's resources added to allocated resources
- expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu2Mem2G, podResources: cpu2Mem2G}},
+ expectedAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu2Mem2G, podResources: cpu2Mem2G}},
ipprPLRFeatureGate: true,
},
{
name: "PLR IPPR Enabled - request different from current allocation. Pod still admitted based on existing allocation, but allocated resources remains unchanges.",
initialAllocatedResourcesState: map[types.UID]resourceState{
- pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
- pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
},
currentActivePods: []*v1.Pod{pod1SmallWithPLR, pod2SmallWithPLR},
podToAdd: pod1LargeWithPLR,
expectAdmit: true,
// allocated Resources state must not be updated.
expectedAllocatedResourcesState: map[types.UID]resourceState{
- pod1UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
- pod2UID: resourceState{containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod1UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
+ pod2UID: {containerResources: cpu1Mem1G, podResources: cpu1Mem1G},
},
ipprPLRFeatureGate: true,
},
{
name: "PLR IPPR Disabled - request different from current allocation. Admission fails. Allocated resources not updated.",
- initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: resourceState{containerResources: cpu1Mem1G}},
+ initialAllocatedResourcesState: map[types.UID]resourceState{pod1UID: {containerResources: cpu1Mem1G}},
currentActivePods: []*v1.Pod{pod1SmallWithPLR},
podToAdd: pod1LargeWithPLR,
admitFunc: func(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
admissionFailureMessage: "not enough CPUs available for pod ns1/pod1, requested: 2, available:1",
// allocated Resources state must not be updated.
expectedAllocatedResourcesState: map[types.UID]resourceState{
- pod1UID: resourceState{containerResources: cpu1Mem1G},
+ pod1UID: {containerResources: cpu1Mem1G},
},
ipprPLRFeatureGate: false,
},
return utilerrors.NewAggregate(errs)
}
+func VerifyPodLevelStatusResources(gotPod *v1.Pod, wantPodResources *v1.ResourceRequirements) error {
+ var errs []error
+ if err := framework.Gomega().Expect(gotPod.Status.AllocatedResources).To(gomega.BeComparableTo(wantPodResources.Requests)); err != nil {
+ errs = append(errs, fmt.Errorf("pod[%s] status allocatedResources mismatch: %w", gotPod.Name, err))
+ }
+
+ if err := framework.Gomega().Expect(gotPod.Status.Resources).To(gomega.BeComparableTo(wantPodResources)); err != nil {
+ errs = append(errs, fmt.Errorf("pod[%s] status resources mismatch: %w", gotPod.Name, err))
+ }
+ return utilerrors.NewAggregate(errs)
+}
+
func verifyPodContainersStatusResources(gotCtrStatuses []v1.ContainerStatus, wantCtrs []v1.Container) error {
ginkgo.GinkgoHelper()
// cgroup values is resulting in indeterministic rounded off values
var wantAllocatedResources v1.ResourceList
- aggrReq, aggrLim := podresize.AggregateContainerResources(gotPod.Spec)
- wantStatusResources = &v1.ResourceRequirements{Requests: aggrReq, Limits: aggrLim}
+ aggrReq, _ := podresize.AggregateContainerResources(gotPod.Spec)
wantAllocatedResources = aggrReq
if gotPod.Spec.Resources != nil {
- wantStatusResources = &v1.ResourceRequirements{
- Requests: gotPod.Spec.Resources.Requests,
- Limits: gotPod.Spec.Resources.Limits,
- }
wantAllocatedResources = gotPod.Spec.Resources.Requests
}
// 1. Decrease the limit a little bit - should succeed
ginkgo.By("Patching pod with a slightly lowered memory limit")
- viableLoweredLimit := []podresize.ResizableContainerInfo{{
- Name: "c1",
- }}
viableLoweredLimitPLR := &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse(reducedMem),
podresize.VerifyPodResources(testPod, containers, viableLoweredLimitPLR)
ginkgo.By("waiting for viable lowered limit to be actuated")
- resizedPod := podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod, viableLoweredLimit)
- podresize.ExpectPodResized(ctx, f, resizedPod, viableLoweredLimit)
+ resizedPod := podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod, containers)
+ podresize.ExpectPodResized(ctx, f, resizedPod, containers)
// There is some latency after container startup before memory usage is scraped. On CRI-O
// this latency is much higher, so wait enough time for cAdvisor to scrape metrics twice.
// 2. Decrease the limit down to a tiny amount - should fail
const nonViableMemoryLimit = "10Ki"
ginkgo.By("Patching pod with a greatly lowered memory limit")
- nonViableLoweredLimit := []podresize.ResizableContainerInfo{{
- Name: "c1",
- Resources: &cgroups.ContainerResources{MemReq: nonViableMemoryLimit, MemLim: nonViableMemoryLimit},
- }}
- patch = podresize.MakeResizePatch(viableLoweredLimit, nonViableLoweredLimit, nil, nil)
+ nonViableLoweredLimitPLR := &v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse(nonViableMemoryLimit),
+ },
+ Limits: v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse(nonViableMemoryLimit),
+ },
+ }
+
+ patch = podresize.MakeResizePatch(containers, containers, viableLoweredLimitPLR, nonViableLoweredLimitPLR)
testPod, pErr = f.ClientSet.CoreV1().Pods(testPod.Namespace).Patch(ctx, testPod.Name,
types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "resize")
framework.ExpectNoError(pErr, "failed to patch pod for viable lowered limit")
Eventually(ctx, framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(testPod.Namespace).Get, testPod.Name, metav1.GetOptions{}))).
WithTimeout(f.Timeouts.PodStart).
Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
- // If VerifyPodStatusResources succeeds, it means the resize completed.
- if podresize.VerifyPodStatusResources(pod, nonViableLoweredLimit) == nil {
+ // If VerifyPodLevelStatusResources succeeds, it means the resize completed.
+ if podresize.VerifyPodLevelStatusResources(pod, nonViableLoweredLimitPLR) == nil {
return nil, gomega.StopTrying("non-viable resize unexpectedly completed")
}
}, nil
}
}
+
if inProgressCondition == nil {
return func() string { return "resize is not in progress" }, nil
}
if inProgressCondition.Reason != v1.PodReasonError {
return func() string { return "in-progress reason is not error" }, nil
}
-
expectedMsg := regexp.MustCompile(`memory limit \(\d+\) below current usage`)
if !expectedMsg.MatchString(inProgressCondition.Message) {
return func() string {
})),
)
ginkgo.By("verifying pod status resources still match the viable resize")
- framework.ExpectNoError(podresize.VerifyPodStatusResources(testPod, viableLoweredLimit))
+ framework.ExpectNoError(podresize.VerifyPodStatusResources(testPod, containers))
// 3. Revert the limit back to the original value - should succeed
ginkgo.By("Patching pod to revert to original state")
- patch = podresize.MakeResizePatch(nonViableLoweredLimit, containers, nil, nil)
+ patch = podresize.MakeResizePatch(containers, containers, viableLoweredLimitPLR, originalPLR)
testPod, pErr = f.ClientSet.CoreV1().Pods(testPod.Namespace).Patch(ctx, testPod.Name,
types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "resize")
framework.ExpectNoError(pErr, "failed to patch pod back to original values")
ginkgo.By("verifying pod patched for original values")
- podresize.VerifyPodResources(testPod, containers, nil)
+ podresize.VerifyPodResources(testPod, containers, originalPLR)
ginkgo.By("waiting for the original values to be actuated")
resizedPod = podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod, containers)
// b) api-server in services doesn't start with --enable-admission-plugins=ResourceQuota
// and is not possible to start it from TEST_ARGS
// Above tests in test/e2e/node/pod_resize.go
-var _ = SIGDescribe("Pod InPlace Resize", func() {
+var _ = SIGDescribe("Pod InPlace Resize Container", func() {
f := framework.NewDefaultFramework("pod-resize-tests")
ginkgo.BeforeEach(func(ctx context.Context) {
})
ginkgo.DescribeTable("pod-resize-resource-quota-test",
- func(ctx context.Context, desiredContainers []podresize.ResizableContainerInfo, expectedContainers []podresize.ResizableContainerInfo, podResources *v1.ResourceRequirements, wantError string) {
+ func(ctx context.Context, desiredContainers []podresize.ResizableContainerInfo, expectedContainers []podresize.ResizableContainerInfo, wantError string) {
tStamp := strconv.Itoa(time.Now().Nanosecond())
- testPod1 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, originalContainers, podResources)
+ testPod1 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, originalContainers, nil)
testPod1 = e2epod.MustMixinRestrictedPodSecurity(testPod1)
- testPod2 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, originalContainers, podResources)
+ testPod2 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, originalContainers, nil)
testPod2 = e2epod.MustMixinRestrictedPodSecurity(testPod2)
ginkgo.By("creating pods")
},
},
originalContainers,
- nil,
"exceeded quota: resize-resource-quota, requested: cpu=300m, used: cpu=600m, limited: cpu=800m",
),
},
},
originalContainers,
- nil,
"exceeded quota: resize-resource-quota, requested: memory=450Mi, used: memory=600Mi, limited: memory=800Mi",
),
},
},
originalContainers,
- nil,
"exceeded quota: resize-resource-quota, requested: cpu=300m,memory=450Mi, used: cpu=600m,memory=600Mi, limited: cpu=800m,memory=800Mi",
),
Resources: &cgroups.ContainerResources{CPUReq: "350m", CPULim: "350m", MemReq: "300Mi", MemLim: "300Mi"},
},
},
- nil,
"",
),
Resources: &cgroups.ContainerResources{CPUReq: "350m", CPULim: "350m", MemReq: "350Mi", MemLim: "350Mi"},
},
},
- nil,
"",
),
Resources: &cgroups.ContainerResources{CPUReq: "400m", CPULim: "400m", MemReq: "400Mi", MemLim: "400Mi"},
},
},
- nil,
"",
),
)