]> git.feebdaed.xyz Git - 0xmirror/kubernetes.git/commitdiff
Scheduler changes to support pod level resources in place resize
authorndixita <ndixita@google.com>
Wed, 22 Oct 2025 04:07:15 +0000 (04:07 +0000)
committerndixita <ndixita@google.com>
Tue, 11 Nov 2025 18:15:22 +0000 (18:15 +0000)
pkg/scheduler/framework/events.go
pkg/scheduler/framework/plugins/feature/feature.go
pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go
pkg/scheduler/framework/plugins/noderesources/fit.go
pkg/scheduler/framework/plugins/noderesources/fit_test.go
pkg/scheduler/framework/plugins/noderesources/resource_allocation.go
pkg/scheduler/framework/types.go
pkg/scheduler/framework/types_test.go
pkg/scheduler/testing/wrappers.go
staging/src/k8s.io/component-helpers/resource/helpers.go

index b0228e65e60c5064a671e02975ca15cdc7fe16f5..52264c61cb44dada5a8411bdc2360a88fcc489f4 100644 (file)
@@ -102,6 +102,7 @@ type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) fwk.ActionType
 func extractPodScaleDown(newPod, oldPod *v1.Pod) fwk.ActionType {
        opt := resource.PodResourcesOptions{
                UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
+               InPlacePodLevelResourcesVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling),
        }
        newPodRequests := resource.PodRequests(newPod, opt)
        oldPodRequests := resource.PodRequests(oldPod, opt)
index b0871976a70e2ee36c90e5df03d16cf864095903..f15c64edbd6f1e19d320a38db1805e368e0b126b 100644 (file)
@@ -25,58 +25,60 @@ import (
 // This struct allows us to break the dependency of the plugins on
 // the internal k8s features pkg.
 type Features struct {
-       EnableDRAExtendedResource                    bool
-       EnableDRAPrioritizedList                     bool
-       EnableDRAAdminAccess                         bool
-       EnableDRAConsumableCapacity                  bool
-       EnableDRADeviceTaints                        bool
-       EnableDRADeviceBindingConditions             bool
-       EnableDRAPartitionableDevices                bool
-       EnableDRAResourceClaimDeviceStatus           bool
-       EnableDRASchedulerFilterTimeout              bool
-       EnableDynamicResourceAllocation              bool
-       EnableVolumeAttributesClass                  bool
-       EnableCSIMigrationPortworx                   bool
-       EnableVolumeLimitScaling                     bool
-       EnableNodeInclusionPolicyInPodTopologySpread bool
-       EnableMatchLabelKeysInPodTopologySpread      bool
-       EnableInPlacePodVerticalScaling              bool
-       EnableSidecarContainers                      bool
-       EnableSchedulingQueueHint                    bool
-       EnableAsyncPreemption                        bool
-       EnablePodLevelResources                      bool
-       EnableStorageCapacityScoring                 bool
-       EnableNodeDeclaredFeatures                   bool
-       EnableGangScheduling                         bool
-       EnableTaintTolerationComparisonOperators     bool
+       EnableDRAExtendedResource                     bool
+       EnableDRAPrioritizedList                      bool
+       EnableDRAAdminAccess                          bool
+       EnableDRAConsumableCapacity                   bool
+       EnableDRADeviceTaints                         bool
+       EnableDRADeviceBindingConditions              bool
+       EnableDRAPartitionableDevices                 bool
+       EnableDRAResourceClaimDeviceStatus            bool
+       EnableDRASchedulerFilterTimeout               bool
+       EnableDynamicResourceAllocation               bool
+       EnableVolumeAttributesClass                   bool
+       EnableCSIMigrationPortworx                    bool
+       EnableVolumeLimitScaling                      bool
+       EnableNodeInclusionPolicyInPodTopologySpread  bool
+       EnableMatchLabelKeysInPodTopologySpread       bool
+       EnableInPlacePodVerticalScaling               bool
+       EnableSidecarContainers                       bool
+       EnableSchedulingQueueHint                     bool
+       EnableAsyncPreemption                         bool
+       EnablePodLevelResources                       bool
+       EnableStorageCapacityScoring                  bool
+       EnableNodeDeclaredFeatures                    bool
+       EnableGangScheduling                          bool
+       EnableTaintTolerationComparisonOperators      bool
+       EnableInPlacePodLevelResourcesVerticalScaling bool
 }
 
 // NewSchedulerFeaturesFromGates copies the current state of the feature gates into the struct.
 func NewSchedulerFeaturesFromGates(featureGate featuregate.FeatureGate) Features {
        return Features{
-               EnableDRAExtendedResource:                    featureGate.Enabled(features.DRAExtendedResource),
-               EnableDRAPrioritizedList:                     featureGate.Enabled(features.DRAPrioritizedList),
-               EnableDRAAdminAccess:                         featureGate.Enabled(features.DRAAdminAccess),
-               EnableDRAConsumableCapacity:                  featureGate.Enabled(features.DRAConsumableCapacity),
-               EnableDRADeviceTaints:                        featureGate.Enabled(features.DRADeviceTaints),
-               EnableDRASchedulerFilterTimeout:              featureGate.Enabled(features.DRASchedulerFilterTimeout),
-               EnableDRAResourceClaimDeviceStatus:           featureGate.Enabled(features.DRAResourceClaimDeviceStatus),
-               EnableDRADeviceBindingConditions:             featureGate.Enabled(features.DRADeviceBindingConditions),
-               EnableDynamicResourceAllocation:              featureGate.Enabled(features.DynamicResourceAllocation),
-               EnableVolumeAttributesClass:                  featureGate.Enabled(features.VolumeAttributesClass),
-               EnableCSIMigrationPortworx:                   featureGate.Enabled(features.CSIMigrationPortworx),
-               EnableVolumeLimitScaling:                     featureGate.Enabled(features.VolumeLimitScaling),
-               EnableNodeInclusionPolicyInPodTopologySpread: featureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
-               EnableMatchLabelKeysInPodTopologySpread:      featureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
-               EnableInPlacePodVerticalScaling:              featureGate.Enabled(features.InPlacePodVerticalScaling),
-               EnableSidecarContainers:                      featureGate.Enabled(features.SidecarContainers),
-               EnableSchedulingQueueHint:                    featureGate.Enabled(features.SchedulerQueueingHints),
-               EnableAsyncPreemption:                        featureGate.Enabled(features.SchedulerAsyncPreemption),
-               EnablePodLevelResources:                      featureGate.Enabled(features.PodLevelResources),
-               EnableDRAPartitionableDevices:                featureGate.Enabled(features.DRAPartitionableDevices),
-               EnableStorageCapacityScoring:                 featureGate.Enabled(features.StorageCapacityScoring),
-               EnableNodeDeclaredFeatures:                   featureGate.Enabled(features.NodeDeclaredFeatures),
-               EnableGangScheduling:                         featureGate.Enabled(features.GangScheduling),
-               EnableTaintTolerationComparisonOperators:     featureGate.Enabled(features.TaintTolerationComparisonOperators),
+               EnableDRAExtendedResource:                     featureGate.Enabled(features.DRAExtendedResource),
+               EnableDRAPrioritizedList:                      featureGate.Enabled(features.DRAPrioritizedList),
+               EnableDRAAdminAccess:                          featureGate.Enabled(features.DRAAdminAccess),
+               EnableDRAConsumableCapacity:                   featureGate.Enabled(features.DRAConsumableCapacity),
+               EnableDRADeviceTaints:                         featureGate.Enabled(features.DRADeviceTaints),
+               EnableDRASchedulerFilterTimeout:               featureGate.Enabled(features.DRASchedulerFilterTimeout),
+               EnableDRAResourceClaimDeviceStatus:            featureGate.Enabled(features.DRAResourceClaimDeviceStatus),
+               EnableDRADeviceBindingConditions:              featureGate.Enabled(features.DRADeviceBindingConditions),
+               EnableDynamicResourceAllocation:               featureGate.Enabled(features.DynamicResourceAllocation),
+               EnableVolumeAttributesClass:                   featureGate.Enabled(features.VolumeAttributesClass),
+               EnableCSIMigrationPortworx:                    featureGate.Enabled(features.CSIMigrationPortworx),
+               EnableVolumeLimitScaling:                      featureGate.Enabled(features.VolumeLimitScaling),
+               EnableNodeInclusionPolicyInPodTopologySpread:  featureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
+               EnableMatchLabelKeysInPodTopologySpread:       featureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
+               EnableInPlacePodVerticalScaling:               featureGate.Enabled(features.InPlacePodVerticalScaling),
+               EnableSidecarContainers:                       featureGate.Enabled(features.SidecarContainers),
+               EnableSchedulingQueueHint:                     featureGate.Enabled(features.SchedulerQueueingHints),
+               EnableAsyncPreemption:                         featureGate.Enabled(features.SchedulerAsyncPreemption),
+               EnablePodLevelResources:                       featureGate.Enabled(features.PodLevelResources),
+               EnableDRAPartitionableDevices:                 featureGate.Enabled(features.DRAPartitionableDevices),
+               EnableStorageCapacityScoring:                  featureGate.Enabled(features.StorageCapacityScoring),
+               EnableNodeDeclaredFeatures:                    featureGate.Enabled(features.NodeDeclaredFeatures),
+               EnableGangScheduling:                          featureGate.Enabled(features.GangScheduling),
+               EnableTaintTolerationComparisonOperators:      featureGate.Enabled(features.TaintTolerationComparisonOperators),
+               EnableInPlacePodLevelResourcesVerticalScaling: featureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling),
        }
 }
index d2b2829e3086b8cef31e59d1386aeaf5fc372449..07fbc01e6d609fd141780c62232945f50df68451 100644 (file)
@@ -169,6 +169,7 @@ func NewBalancedAllocation(_ context.Context, baArgs runtime.Object, h fwk.Handl
                        scorer:                          balancedResourceScorer,
                        useRequested:                    true,
                        resources:                       args.Resources,
+                       enableInPlacePodLevelResourcesVerticalScaling: fts.EnableInPlacePodLevelResourcesVerticalScaling,
                },
        }, nil
 }
index be2ede7cd545ce8d10434a9205b3e41ecd25798a..2fc3eb19227a2a07dcb68c72b9912265f8787de4 100644 (file)
@@ -89,14 +89,15 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
 
 // Fit is a plugin that checks if a node has sufficient resources.
 type Fit struct {
-       ignoredResources                sets.Set[string]
-       ignoredResourceGroups           sets.Set[string]
-       enableInPlacePodVerticalScaling bool
-       enableSidecarContainers         bool
-       enableSchedulingQueueHint       bool
-       enablePodLevelResources         bool
-       enableDRAExtendedResource       bool
-       handle                          fwk.Handle
+       ignoredResources                              sets.Set[string]
+       ignoredResourceGroups                         sets.Set[string]
+       enableInPlacePodVerticalScaling               bool
+       enableSidecarContainers                       bool
+       enableSchedulingQueueHint                     bool
+       enablePodLevelResources                       bool
+       enableDRAExtendedResource                     bool
+       enableInPlacePodLevelResourcesVerticalScaling bool
+       handle                                        fwk.Handle
        *resourceAllocationScorer
 }
 
@@ -199,15 +200,16 @@ func NewFit(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.
        }
 
        return &Fit{
-               ignoredResources:                sets.New(args.IgnoredResources...),
-               ignoredResourceGroups:           sets.New(args.IgnoredResourceGroups...),
-               enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling,
-               enableSidecarContainers:         fts.EnableSidecarContainers,
-               enableSchedulingQueueHint:       fts.EnableSchedulingQueueHint,
-               handle:                          h,
-               enablePodLevelResources:         fts.EnablePodLevelResources,
-               enableDRAExtendedResource:       fts.EnableDRAExtendedResource,
-               resourceAllocationScorer:        scorer,
+               ignoredResources:                              sets.New(args.IgnoredResources...),
+               ignoredResourceGroups:                         sets.New(args.IgnoredResourceGroups...),
+               enableInPlacePodVerticalScaling:               fts.EnableInPlacePodVerticalScaling,
+               enableSidecarContainers:                       fts.EnableSidecarContainers,
+               enableSchedulingQueueHint:                     fts.EnableSchedulingQueueHint,
+               handle:                                        h,
+               enablePodLevelResources:                       fts.EnablePodLevelResources,
+               enableDRAExtendedResource:                     fts.EnableDRAExtendedResource,
+               enableInPlacePodLevelResourcesVerticalScaling: fts.EnableInPlacePodLevelResourcesVerticalScaling,
+               resourceAllocationScorer:                      scorer,
        }, nil
 }
 
@@ -273,8 +275,6 @@ func shouldDelegateResourceToDRA(rName v1.ResourceName, nodeInfo fwk.NodeInfo, d
 //         Memory: 1G
 //
 // Result: CPU: 3, Memory: 3G
-// TODO(ndixita): modify computePodResourceRequest to accept opts of type
-// ResourceRequestOptions as the second parameter.
 func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFilterState {
        // pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled
        reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
@@ -409,11 +409,12 @@ func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod
 
        // the other pod was scheduled, so modification or deletion may free up some resources.
        originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
-       originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
-       modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
+       opts := resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling, InPlacePodLevelResourcesVerticalScalingEnabled: f.enableInPlacePodLevelResourcesVerticalScaling}
+       originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, opts))
+       modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, opts))
 
        // check whether the resource request of the modified pod is less than the original pod.
-       podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})
+       podRequests := resource.PodRequests(targetPod, opts)
        for rName, rValue := range podRequests {
                if rValue.IsZero() {
                        // We only care about the resources requested by the pod we are trying to schedule.
index 8088591815b961dd4a9aaa2999e7a5bda85f23df..2953b71876a6ee75fdf85241a20c20cb3a9dbc7e 100644 (file)
@@ -1537,6 +1537,13 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
                        enableInPlacePodVerticalScaling: true,
                        expectedHint:                    fwk.QueueSkip,
                },
+               "skip-queue-on-other-pod-unrelated-pod-level-resource-scaled-down": {
+                       pod:                             st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+                       oldObj:                          st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
+                       newObj:                          st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
+                       enableInPlacePodVerticalScaling: true,
+                       expectedHint:                    fwk.QueueSkip,
+               },
                "queue-on-other-pod-some-resource-scale-down": {
                        pod:                             st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
                        oldObj:                          st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
@@ -1544,6 +1551,13 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
                        enableInPlacePodVerticalScaling: true,
                        expectedHint:                    fwk.Queue,
                },
+               "queue-on-other-pod-some-pod-level-resource-scale-down": {
+                       pod:                             st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+                       oldObj:                          st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
+                       newObj:                          st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
+                       enableInPlacePodVerticalScaling: true,
+                       expectedHint:                    fwk.Queue,
+               },
                "queue-on-target-pod-some-resource-scale-down": {
                        pod:                             st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
                        oldObj:                          st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
@@ -1551,6 +1565,13 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
                        enableInPlacePodVerticalScaling: true,
                        expectedHint:                    fwk.Queue,
                },
+               "queue-on-target-pod-some-pod-level-resource-scale-down": {
+                       pod:                             st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+                       oldObj:                          st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
+                       newObj:                          st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+                       enableInPlacePodVerticalScaling: true,
+                       expectedHint:                    fwk.Queue,
+               },
        }
 
        for name, tc := range testcases {
index 69d6337f7140668912f7a10eb263da03046e1aa3..b4f6f16b4bf9be0496f3159fbf167105977c9dac 100644 (file)
@@ -50,10 +50,11 @@ type DRACaches struct {
 
 // resourceAllocationScorer contains information to calculate resource allocation score.
 type resourceAllocationScorer struct {
-       Name                            string
-       enableInPlacePodVerticalScaling bool
-       enablePodLevelResources         bool
-       enableDRAExtendedResource       bool
+       Name                                          string
+       enableInPlacePodVerticalScaling               bool
+       enablePodLevelResources                       bool
+       enableDRAExtendedResource                     bool
+       enableInPlacePodLevelResourcesVerticalScaling bool
        // used to decide whether to use Requested or NonZeroRequested for
        // cpu and memory.
        useRequested bool
@@ -225,6 +226,7 @@ func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, reso
 
        opts := resourcehelper.PodResourcesOptions{
                UseStatusResources: r.enableInPlacePodVerticalScaling,
+               InPlacePodLevelResourcesVerticalScalingEnabled: r.enableInPlacePodLevelResourcesVerticalScaling,
                // SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
                SkipPodLevelResources: !r.enablePodLevelResources,
        }
index daa9768cc458c4e4a2b3148c1d3ac9b62ecacfaf..9e92527be172b9480939dc3f3c6625193e4770f0 100644 (file)
@@ -720,8 +720,10 @@ func (pi *PodInfo) CalculateResource() fwk.PodResource {
        }
        inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling)
        podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources)
+       inPlacePodLevelResourcesVerticalScalingEnabled := utilfeature.DefaultMutableFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling)
        requests := resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
                UseStatusResources: inPlacePodVerticalScalingEnabled,
+               InPlacePodLevelResourcesVerticalScalingEnabled: inPlacePodLevelResourcesVerticalScalingEnabled,
                // SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
                SkipPodLevelResources: !podLevelResourcesEnabled,
        })
@@ -731,6 +733,7 @@ func (pi *PodInfo) CalculateResource() fwk.PodResource {
        if len(nonMissingContainerRequests) > 0 {
                non0Requests = resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
                        UseStatusResources: inPlacePodVerticalScalingEnabled,
+                       InPlacePodLevelResourcesVerticalScalingEnabled: inPlacePodLevelResourcesVerticalScalingEnabled,
                        // SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
                        SkipPodLevelResources:       !podLevelResourcesEnabled,
                        NonMissingContainerRequests: nonMissingContainerRequests,
index 3a928ba8eec7cc6ba52f66d57776c3e1f24b603c..2da11eeb4b58ddf4ee30f7b244170d9b5588601f 100644 (file)
@@ -1704,6 +1704,8 @@ func TestPodInfoCalculateResources(t *testing.T) {
 
 func TestCalculatePodResourcesWithResize(t *testing.T) {
        featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
+       featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodLevelResourcesVerticalScaling, true)
+
        testpod := v1.Pod{
                ObjectMeta: metav1.ObjectMeta{
                        Namespace: "pod_resize_test",
@@ -1718,11 +1720,24 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
        restartAlways := v1.ContainerRestartPolicyAlways
 
        preparePodInfo := func(pod v1.Pod,
+               podRequests, podStatusResources,
                requests, statusResources,
                initRequests, initStatusResources,
                sidecarRequests, sidecarStatusResources *v1.ResourceList,
                resizeStatus []*v1.PodCondition) PodInfo {
 
+               if podRequests != nil {
+                       pod.Spec.Resources = &v1.ResourceRequirements{
+                               Requests: *podRequests,
+                       }
+               }
+
+               if podStatusResources != nil {
+                       pod.Status.Resources = &v1.ResourceRequirements{
+                               Requests: *podStatusResources,
+                       }
+               }
+
                if requests != nil {
                        pod.Spec.Containers = append(pod.Spec.Containers,
                                v1.Container{
@@ -1785,15 +1800,17 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
        }
 
        tests := []struct {
-               name                   string
-               requests               v1.ResourceList
-               statusResources        v1.ResourceList
-               initRequests           *v1.ResourceList
-               initStatusResources    *v1.ResourceList
-               resizeStatus           []*v1.PodCondition
-               sidecarRequests        *v1.ResourceList
-               sidecarStatusResources *v1.ResourceList
-               expectedResource       fwk.PodResource
+               name                    string
+               podLevelRequests        *v1.ResourceList
+               requests                v1.ResourceList
+               statusResources         v1.ResourceList
+               podLevelStatusResources *v1.ResourceList
+               initRequests            *v1.ResourceList
+               initStatusResources     *v1.ResourceList
+               resizeStatus            []*v1.PodCondition
+               sidecarRequests         *v1.ResourceList
+               sidecarStatusResources  *v1.ResourceList
+               expectedResource        fwk.PodResource
        }{
                {
                        name:            "Pod with no pending resize",
@@ -1808,6 +1825,21 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
                                Non0Mem: mem500M.Value(),
                        },
                },
+               {
+                       name:                    "Pod with pod-level resources no pending resize",
+                       podLevelRequests:        &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+                       requests:                v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       podLevelStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       statusResources:         v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+                       expectedResource: fwk.PodResource{
+                               Resource: &Resource{
+                                       MilliCPU: cpu700m.MilliValue(),
+                                       Memory:   mem500M.Value(),
+                               },
+                               Non0CPU: cpu700m.MilliValue(),
+                               Non0Mem: mem500M.Value(),
+                       },
+               },
                {
                        name:            "Pod with resize in progress",
                        requests:        v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
@@ -1827,6 +1859,27 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
                                Non0Mem: mem500M.Value(),
                        },
                },
+               {
+                       name:                    "Pod with pod-level resources and resize in progress",
+                       podLevelRequests:        &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+                       requests:                v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       podLevelStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       statusResources:         v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+                       resizeStatus: []*v1.PodCondition{
+                               {
+                                       Type:   v1.PodResizeInProgress,
+                                       Status: v1.ConditionTrue,
+                               },
+                       },
+                       expectedResource: fwk.PodResource{
+                               Resource: &Resource{
+                                       MilliCPU: cpu700m.MilliValue(),
+                                       Memory:   mem500M.Value(),
+                               },
+                               Non0CPU: cpu700m.MilliValue(),
+                               Non0Mem: mem500M.Value(),
+                       },
+               },
                {
                        name:            "Pod with deferred resize",
                        requests:        v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
@@ -1847,6 +1900,28 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
                                Non0Mem: mem800M.Value(),
                        },
                },
+               {
+                       name:                    "Pod with pod-level resources and with deferred resize",
+                       podLevelRequests:        &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
+                       requests:                v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       podLevelStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       statusResources:         v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+                       resizeStatus: []*v1.PodCondition{
+                               {
+                                       Type:   v1.PodResizePending,
+                                       Status: v1.ConditionTrue,
+                                       Reason: v1.PodReasonDeferred,
+                               },
+                       },
+                       expectedResource: fwk.PodResource{
+                               Resource: &Resource{
+                                       MilliCPU: cpu700m.MilliValue(),
+                                       Memory:   mem800M.Value(),
+                               },
+                               Non0CPU: cpu700m.MilliValue(),
+                               Non0Mem: mem800M.Value(),
+                       },
+               },
                {
                        name:            "Pod with infeasible resize",
                        requests:        v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
@@ -1904,6 +1979,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
        for _, tt := range tests {
                t.Run(tt.name, func(t *testing.T) {
                        podInfo := preparePodInfo(*testpod.DeepCopy(),
+                               tt.podLevelRequests, tt.podLevelStatusResources,
                                &tt.requests, &tt.statusResources,
                                tt.initRequests, tt.initStatusResources,
                                tt.sidecarRequests, tt.sidecarStatusResources,
index bfe161c971211c9a1fae8ee4a80e2f7a9d6bc420..224e6c1fe18a2c6591f1d7bf1a4094909b5107c0 100644 (file)
@@ -794,6 +794,22 @@ func (p *PodWrapper) Res(resMap map[v1.ResourceName]string) *PodWrapper {
        return p
 }
 
+// Resources sets requests and limits at pod-level.
+func (p *PodWrapper) PodLevelResourceRequests(reqMap map[v1.ResourceName]string) *PodWrapper {
+       if len(reqMap) == 0 {
+               return p
+       }
+
+       res := v1.ResourceList{}
+       for k, v := range reqMap {
+               res[k] = resource.MustParse(v)
+       }
+       p.Spec.Resources = &v1.ResourceRequirements{
+               Requests: res,
+       }
+       return p
+}
+
 // Req adds a new container to the inner pod with given resource map of requests.
 func (p *PodWrapper) Req(reqMap map[v1.ResourceName]string) *PodWrapper {
        if len(reqMap) == 0 {
index c37945f4b1e64e7fbdff45ce6de60d70249ba1e9..c8b3c7ece6f38cb733243688ae90fd0380252216 100644 (file)
@@ -42,6 +42,11 @@ type PodResourcesOptions struct {
        // when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
        // feature is not enabled.
        UseStatusResources bool
+       // InPlacePodLevelResourcesVerticalScalingEnabled indicates whether resources reported by the
+       // PodStatus should be considered when evaluating the pod resources.
+       // This MUST be false if the InPlacePodLevelResourcesVerticalScaling
+       // feature is not enabled.
+       InPlacePodLevelResourcesVerticalScalingEnabled bool
        // ExcludeOverhead controls if pod overhead is excluded from the calculation.
        ExcludeOverhead bool
        // ContainerFn is called with the effective resources required for each container within the pod.
@@ -148,9 +153,25 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
        }
 
        if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) {
+
+               var effectiveReqs v1.ResourceList
+               if opts.InPlacePodLevelResourcesVerticalScalingEnabled && opts.UseStatusResources {
+                       if pod.Status.Resources != nil {
+                               effectiveReqs = determineEffectiveRequests(pod, &ResourceState{
+                                       Spec:      pod.Spec.Resources.Requests,
+                                       Actuated:  pod.Status.Resources.Requests,
+                                       Allocated: pod.Status.AllocatedResources,
+                               })
+                       }
+               }
+
                for resourceName, quantity := range pod.Spec.Resources.Requests {
                        if IsSupportedPodLevelResource(resourceName) {
                                reqs[resourceName] = quantity
+                               if effectiveReqs != nil {
+                                       reqs[resourceName] = effectiveReqs[resourceName]
+                               }
+
                        }
                }
        }
@@ -186,7 +207,11 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
                if opts.UseStatusResources {
                        cs, found := containerStatuses[container.Name]
                        if found && cs.Resources != nil {
-                               containerReqs = determineContainerReqs(pod, &container, cs)
+                               containerReqs = determineEffectiveRequests(pod, &ResourceState{
+                                       Spec:      container.Resources.Requests,
+                                       Actuated:  cs.Resources.Requests,
+                                       Allocated: cs.AllocatedResources,
+                               })
                        }
                }
 
@@ -216,7 +241,11 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
                        if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
                                cs, found := containerStatuses[container.Name]
                                if found && cs.Resources != nil {
-                                       containerReqs = determineContainerReqs(pod, &container, cs)
+                                       containerReqs = determineEffectiveRequests(pod, &ResourceState{
+                                               Spec:      container.Resources.Requests,
+                                               Actuated:  cs.Resources.Requests,
+                                               Allocated: cs.AllocatedResources,
+                                       })
                                }
                        }
                }
@@ -249,20 +278,24 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
        return reqs
 }
 
-// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
-func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
+type ResourceState struct {
+       Spec      v1.ResourceList
+       Actuated  v1.ResourceList
+       Allocated v1.ResourceList
+}
+
+func determineEffectiveRequests(pod *v1.Pod, rs *ResourceState) v1.ResourceList {
        if IsPodResizeInfeasible(pod) {
-               return max(cs.Resources.Requests, cs.AllocatedResources)
+               return max(rs.Actuated, rs.Allocated)
        }
-       return max(container.Resources.Requests, cs.Resources.Requests, cs.AllocatedResources)
+       return max(rs.Spec, rs.Actuated, rs.Allocated)
 }
 
-// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
-func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
+func determineEffectiveLimits(pod *v1.Pod, rs *ResourceState) v1.ResourceList {
        if IsPodResizeInfeasible(pod) {
-               return cs.Resources.Limits.DeepCopy()
+               return rs.Actuated.DeepCopy()
        }
-       return max(container.Resources.Limits, cs.Resources.Limits)
+       return max(rs.Spec, rs.Actuated)
 }
 
 // IsPodResizeInfeasible returns true if the pod condition PodResizePending is set to infeasible.
@@ -309,9 +342,22 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
        // attempt to reuse the maps if passed, or allocate otherwise
        limits := AggregateContainerLimits(pod, opts)
        if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) {
+
+               var effectiveLims v1.ResourceList
+               if opts.InPlacePodLevelResourcesVerticalScalingEnabled && opts.UseStatusResources {
+                       if pod.Status.Resources != nil {
+                               effectiveLims = determineEffectiveLimits(pod, &ResourceState{
+                                       Spec:     pod.Spec.Resources.Limits,
+                                       Actuated: pod.Status.Resources.Limits,
+                               })
+                       }
+               }
                for resourceName, quantity := range pod.Spec.Resources.Limits {
                        if IsSupportedPodLevelResource(resourceName) {
                                limits[resourceName] = quantity
+                               if effectiveLims != nil {
+                                       limits[resourceName] = effectiveLims[resourceName]
+                               }
                        }
                }
        }
@@ -352,7 +398,10 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
                if opts.UseStatusResources {
                        cs, found := containerStatuses[container.Name]
                        if found && cs.Resources != nil {
-                               containerLimits = determineContainerLimits(pod, &container, cs)
+                               containerLimits = determineEffectiveLimits(pod, &ResourceState{
+                                       Spec:     containerLimits,
+                                       Actuated: cs.Resources.Limits,
+                               })
                        }
                }
 
@@ -377,7 +426,10 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
                        if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
                                cs, found := containerStatuses[container.Name]
                                if found && cs.Resources != nil {
-                                       containerLimits = determineContainerLimits(pod, &container, cs)
+                                       containerLimits = determineEffectiveLimits(pod, &ResourceState{
+                                               Spec:     containerLimits,
+                                               Actuated: cs.Resources.Limits,
+                                       })
                                }
                        }
                }