func extractPodScaleDown(newPod, oldPod *v1.Pod) fwk.ActionType {
opt := resource.PodResourcesOptions{
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
+ InPlacePodLevelResourcesVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling),
}
newPodRequests := resource.PodRequests(newPod, opt)
oldPodRequests := resource.PodRequests(oldPod, opt)
// This struct allows us to break the dependency of the plugins on
// the internal k8s features pkg.
type Features struct {
- EnableDRAExtendedResource bool
- EnableDRAPrioritizedList bool
- EnableDRAAdminAccess bool
- EnableDRAConsumableCapacity bool
- EnableDRADeviceTaints bool
- EnableDRADeviceBindingConditions bool
- EnableDRAPartitionableDevices bool
- EnableDRAResourceClaimDeviceStatus bool
- EnableDRASchedulerFilterTimeout bool
- EnableDynamicResourceAllocation bool
- EnableVolumeAttributesClass bool
- EnableCSIMigrationPortworx bool
- EnableVolumeLimitScaling bool
- EnableNodeInclusionPolicyInPodTopologySpread bool
- EnableMatchLabelKeysInPodTopologySpread bool
- EnableInPlacePodVerticalScaling bool
- EnableSidecarContainers bool
- EnableSchedulingQueueHint bool
- EnableAsyncPreemption bool
- EnablePodLevelResources bool
- EnableStorageCapacityScoring bool
- EnableNodeDeclaredFeatures bool
- EnableGangScheduling bool
- EnableTaintTolerationComparisonOperators bool
+ EnableDRAExtendedResource bool
+ EnableDRAPrioritizedList bool
+ EnableDRAAdminAccess bool
+ EnableDRAConsumableCapacity bool
+ EnableDRADeviceTaints bool
+ EnableDRADeviceBindingConditions bool
+ EnableDRAPartitionableDevices bool
+ EnableDRAResourceClaimDeviceStatus bool
+ EnableDRASchedulerFilterTimeout bool
+ EnableDynamicResourceAllocation bool
+ EnableVolumeAttributesClass bool
+ EnableCSIMigrationPortworx bool
+ EnableVolumeLimitScaling bool
+ EnableNodeInclusionPolicyInPodTopologySpread bool
+ EnableMatchLabelKeysInPodTopologySpread bool
+ EnableInPlacePodVerticalScaling bool
+ EnableSidecarContainers bool
+ EnableSchedulingQueueHint bool
+ EnableAsyncPreemption bool
+ EnablePodLevelResources bool
+ EnableStorageCapacityScoring bool
+ EnableNodeDeclaredFeatures bool
+ EnableGangScheduling bool
+ EnableTaintTolerationComparisonOperators bool
+ EnableInPlacePodLevelResourcesVerticalScaling bool
}
// NewSchedulerFeaturesFromGates copies the current state of the feature gates into the struct.
func NewSchedulerFeaturesFromGates(featureGate featuregate.FeatureGate) Features {
return Features{
- EnableDRAExtendedResource: featureGate.Enabled(features.DRAExtendedResource),
- EnableDRAPrioritizedList: featureGate.Enabled(features.DRAPrioritizedList),
- EnableDRAAdminAccess: featureGate.Enabled(features.DRAAdminAccess),
- EnableDRAConsumableCapacity: featureGate.Enabled(features.DRAConsumableCapacity),
- EnableDRADeviceTaints: featureGate.Enabled(features.DRADeviceTaints),
- EnableDRASchedulerFilterTimeout: featureGate.Enabled(features.DRASchedulerFilterTimeout),
- EnableDRAResourceClaimDeviceStatus: featureGate.Enabled(features.DRAResourceClaimDeviceStatus),
- EnableDRADeviceBindingConditions: featureGate.Enabled(features.DRADeviceBindingConditions),
- EnableDynamicResourceAllocation: featureGate.Enabled(features.DynamicResourceAllocation),
- EnableVolumeAttributesClass: featureGate.Enabled(features.VolumeAttributesClass),
- EnableCSIMigrationPortworx: featureGate.Enabled(features.CSIMigrationPortworx),
- EnableVolumeLimitScaling: featureGate.Enabled(features.VolumeLimitScaling),
- EnableNodeInclusionPolicyInPodTopologySpread: featureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
- EnableMatchLabelKeysInPodTopologySpread: featureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
- EnableInPlacePodVerticalScaling: featureGate.Enabled(features.InPlacePodVerticalScaling),
- EnableSidecarContainers: featureGate.Enabled(features.SidecarContainers),
- EnableSchedulingQueueHint: featureGate.Enabled(features.SchedulerQueueingHints),
- EnableAsyncPreemption: featureGate.Enabled(features.SchedulerAsyncPreemption),
- EnablePodLevelResources: featureGate.Enabled(features.PodLevelResources),
- EnableDRAPartitionableDevices: featureGate.Enabled(features.DRAPartitionableDevices),
- EnableStorageCapacityScoring: featureGate.Enabled(features.StorageCapacityScoring),
- EnableNodeDeclaredFeatures: featureGate.Enabled(features.NodeDeclaredFeatures),
- EnableGangScheduling: featureGate.Enabled(features.GangScheduling),
- EnableTaintTolerationComparisonOperators: featureGate.Enabled(features.TaintTolerationComparisonOperators),
+ EnableDRAExtendedResource: featureGate.Enabled(features.DRAExtendedResource),
+ EnableDRAPrioritizedList: featureGate.Enabled(features.DRAPrioritizedList),
+ EnableDRAAdminAccess: featureGate.Enabled(features.DRAAdminAccess),
+ EnableDRAConsumableCapacity: featureGate.Enabled(features.DRAConsumableCapacity),
+ EnableDRADeviceTaints: featureGate.Enabled(features.DRADeviceTaints),
+ EnableDRASchedulerFilterTimeout: featureGate.Enabled(features.DRASchedulerFilterTimeout),
+ EnableDRAResourceClaimDeviceStatus: featureGate.Enabled(features.DRAResourceClaimDeviceStatus),
+ EnableDRADeviceBindingConditions: featureGate.Enabled(features.DRADeviceBindingConditions),
+ EnableDynamicResourceAllocation: featureGate.Enabled(features.DynamicResourceAllocation),
+ EnableVolumeAttributesClass: featureGate.Enabled(features.VolumeAttributesClass),
+ EnableCSIMigrationPortworx: featureGate.Enabled(features.CSIMigrationPortworx),
+ EnableVolumeLimitScaling: featureGate.Enabled(features.VolumeLimitScaling),
+ EnableNodeInclusionPolicyInPodTopologySpread: featureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
+ EnableMatchLabelKeysInPodTopologySpread: featureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
+ EnableInPlacePodVerticalScaling: featureGate.Enabled(features.InPlacePodVerticalScaling),
+ EnableSidecarContainers: featureGate.Enabled(features.SidecarContainers),
+ EnableSchedulingQueueHint: featureGate.Enabled(features.SchedulerQueueingHints),
+ EnableAsyncPreemption: featureGate.Enabled(features.SchedulerAsyncPreemption),
+ EnablePodLevelResources: featureGate.Enabled(features.PodLevelResources),
+ EnableDRAPartitionableDevices: featureGate.Enabled(features.DRAPartitionableDevices),
+ EnableStorageCapacityScoring: featureGate.Enabled(features.StorageCapacityScoring),
+ EnableNodeDeclaredFeatures: featureGate.Enabled(features.NodeDeclaredFeatures),
+ EnableGangScheduling: featureGate.Enabled(features.GangScheduling),
+ EnableTaintTolerationComparisonOperators: featureGate.Enabled(features.TaintTolerationComparisonOperators),
+ EnableInPlacePodLevelResourcesVerticalScaling: featureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling),
}
}
scorer: balancedResourceScorer,
useRequested: true,
resources: args.Resources,
+ enableInPlacePodLevelResourcesVerticalScaling: fts.EnableInPlacePodLevelResourcesVerticalScaling,
},
}, nil
}
// Fit is a plugin that checks if a node has sufficient resources.
type Fit struct {
- ignoredResources sets.Set[string]
- ignoredResourceGroups sets.Set[string]
- enableInPlacePodVerticalScaling bool
- enableSidecarContainers bool
- enableSchedulingQueueHint bool
- enablePodLevelResources bool
- enableDRAExtendedResource bool
- handle fwk.Handle
+ ignoredResources sets.Set[string]
+ ignoredResourceGroups sets.Set[string]
+ enableInPlacePodVerticalScaling bool
+ enableSidecarContainers bool
+ enableSchedulingQueueHint bool
+ enablePodLevelResources bool
+ enableDRAExtendedResource bool
+ enableInPlacePodLevelResourcesVerticalScaling bool
+ handle fwk.Handle
*resourceAllocationScorer
}
}
return &Fit{
- ignoredResources: sets.New(args.IgnoredResources...),
- ignoredResourceGroups: sets.New(args.IgnoredResourceGroups...),
- enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling,
- enableSidecarContainers: fts.EnableSidecarContainers,
- enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
- handle: h,
- enablePodLevelResources: fts.EnablePodLevelResources,
- enableDRAExtendedResource: fts.EnableDRAExtendedResource,
- resourceAllocationScorer: scorer,
+ ignoredResources: sets.New(args.IgnoredResources...),
+ ignoredResourceGroups: sets.New(args.IgnoredResourceGroups...),
+ enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling,
+ enableSidecarContainers: fts.EnableSidecarContainers,
+ enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
+ handle: h,
+ enablePodLevelResources: fts.EnablePodLevelResources,
+ enableDRAExtendedResource: fts.EnableDRAExtendedResource,
+ enableInPlacePodLevelResourcesVerticalScaling: fts.EnableInPlacePodLevelResourcesVerticalScaling,
+ resourceAllocationScorer: scorer,
}, nil
}
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
-// TODO(ndixita): modify computePodResourceRequest to accept opts of type
-// ResourceRequestOptions as the second parameter.
func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFilterState {
// pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
// the other pod was scheduled, so modification or deletion may free up some resources.
originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
- originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
- modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
+ opts := resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling, InPlacePodLevelResourcesVerticalScalingEnabled: f.enableInPlacePodLevelResourcesVerticalScaling}
+ originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, opts))
+ modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, opts))
// check whether the resource request of the modified pod is less than the original pod.
- podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})
+ podRequests := resource.PodRequests(targetPod, opts)
for rName, rValue := range podRequests {
if rValue.IsZero() {
// We only care about the resources requested by the pod we are trying to schedule.
enableInPlacePodVerticalScaling: true,
expectedHint: fwk.QueueSkip,
},
+ "skip-queue-on-other-pod-unrelated-pod-level-resource-scaled-down": {
+ pod: st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+ oldObj: st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
+ newObj: st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
+ enableInPlacePodVerticalScaling: true,
+ expectedHint: fwk.QueueSkip,
+ },
"queue-on-other-pod-some-resource-scale-down": {
pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: fwk.Queue,
},
+ "queue-on-other-pod-some-pod-level-resource-scale-down": {
+ pod: st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+ oldObj: st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
+ newObj: st.MakePod().Name("pod2").UID("pod2").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
+ enableInPlacePodVerticalScaling: true,
+ expectedHint: fwk.Queue,
+ },
"queue-on-target-pod-some-resource-scale-down": {
pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: fwk.Queue,
},
+ "queue-on-target-pod-some-pod-level-resource-scale-down": {
+ pod: st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+ oldObj: st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
+ newObj: st.MakePod().Name("pod1").UID("pod1").PodLevelResourceRequests(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
+ enableInPlacePodVerticalScaling: true,
+ expectedHint: fwk.Queue,
+ },
}
for name, tc := range testcases {
// resourceAllocationScorer contains information to calculate resource allocation score.
type resourceAllocationScorer struct {
- Name string
- enableInPlacePodVerticalScaling bool
- enablePodLevelResources bool
- enableDRAExtendedResource bool
+ Name string
+ enableInPlacePodVerticalScaling bool
+ enablePodLevelResources bool
+ enableDRAExtendedResource bool
+ enableInPlacePodLevelResourcesVerticalScaling bool
// used to decide whether to use Requested or NonZeroRequested for
// cpu and memory.
useRequested bool
opts := resourcehelper.PodResourcesOptions{
UseStatusResources: r.enableInPlacePodVerticalScaling,
+ InPlacePodLevelResourcesVerticalScalingEnabled: r.enableInPlacePodLevelResourcesVerticalScaling,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !r.enablePodLevelResources,
}
}
inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling)
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources)
+ inPlacePodLevelResourcesVerticalScalingEnabled := utilfeature.DefaultMutableFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling)
requests := resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
UseStatusResources: inPlacePodVerticalScalingEnabled,
+ InPlacePodLevelResourcesVerticalScalingEnabled: inPlacePodLevelResourcesVerticalScalingEnabled,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
})
if len(nonMissingContainerRequests) > 0 {
non0Requests = resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
UseStatusResources: inPlacePodVerticalScalingEnabled,
+ InPlacePodLevelResourcesVerticalScalingEnabled: inPlacePodLevelResourcesVerticalScalingEnabled,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
NonMissingContainerRequests: nonMissingContainerRequests,
func TestCalculatePodResourcesWithResize(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
+ featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodLevelResourcesVerticalScaling, true)
+
testpod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "pod_resize_test",
restartAlways := v1.ContainerRestartPolicyAlways
preparePodInfo := func(pod v1.Pod,
+ podRequests, podStatusResources,
requests, statusResources,
initRequests, initStatusResources,
sidecarRequests, sidecarStatusResources *v1.ResourceList,
resizeStatus []*v1.PodCondition) PodInfo {
+ if podRequests != nil {
+ pod.Spec.Resources = &v1.ResourceRequirements{
+ Requests: *podRequests,
+ }
+ }
+
+ if podStatusResources != nil {
+ pod.Status.Resources = &v1.ResourceRequirements{
+ Requests: *podStatusResources,
+ }
+ }
+
if requests != nil {
pod.Spec.Containers = append(pod.Spec.Containers,
v1.Container{
}
tests := []struct {
- name string
- requests v1.ResourceList
- statusResources v1.ResourceList
- initRequests *v1.ResourceList
- initStatusResources *v1.ResourceList
- resizeStatus []*v1.PodCondition
- sidecarRequests *v1.ResourceList
- sidecarStatusResources *v1.ResourceList
- expectedResource fwk.PodResource
+ name string
+ podLevelRequests *v1.ResourceList
+ requests v1.ResourceList
+ statusResources v1.ResourceList
+ podLevelStatusResources *v1.ResourceList
+ initRequests *v1.ResourceList
+ initStatusResources *v1.ResourceList
+ resizeStatus []*v1.PodCondition
+ sidecarRequests *v1.ResourceList
+ sidecarStatusResources *v1.ResourceList
+ expectedResource fwk.PodResource
}{
{
name: "Pod with no pending resize",
Non0Mem: mem500M.Value(),
},
},
+ {
+ name: "Pod with pod-level resources no pending resize",
+ podLevelRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+ requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ podLevelStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ statusResources: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+ expectedResource: fwk.PodResource{
+ Resource: &Resource{
+ MilliCPU: cpu700m.MilliValue(),
+ Memory: mem500M.Value(),
+ },
+ Non0CPU: cpu700m.MilliValue(),
+ Non0Mem: mem500M.Value(),
+ },
+ },
{
name: "Pod with resize in progress",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
Non0Mem: mem500M.Value(),
},
},
+ {
+ name: "Pod with pod-level resources and resize in progress",
+ podLevelRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+ requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ podLevelStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ statusResources: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem500M},
+ resizeStatus: []*v1.PodCondition{
+ {
+ Type: v1.PodResizeInProgress,
+ Status: v1.ConditionTrue,
+ },
+ },
+ expectedResource: fwk.PodResource{
+ Resource: &Resource{
+ MilliCPU: cpu700m.MilliValue(),
+ Memory: mem500M.Value(),
+ },
+ Non0CPU: cpu700m.MilliValue(),
+ Non0Mem: mem500M.Value(),
+ },
+ },
{
name: "Pod with deferred resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
Non0Mem: mem800M.Value(),
},
},
+ {
+ name: "Pod with pod-level resources and with deferred resize",
+ podLevelRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
+ requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ podLevelStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
+ resizeStatus: []*v1.PodCondition{
+ {
+ Type: v1.PodResizePending,
+ Status: v1.ConditionTrue,
+ Reason: v1.PodReasonDeferred,
+ },
+ },
+ expectedResource: fwk.PodResource{
+ Resource: &Resource{
+ MilliCPU: cpu700m.MilliValue(),
+ Memory: mem800M.Value(),
+ },
+ Non0CPU: cpu700m.MilliValue(),
+ Non0Mem: mem800M.Value(),
+ },
+ },
{
name: "Pod with infeasible resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
podInfo := preparePodInfo(*testpod.DeepCopy(),
+ tt.podLevelRequests, tt.podLevelStatusResources,
&tt.requests, &tt.statusResources,
tt.initRequests, tt.initStatusResources,
tt.sidecarRequests, tt.sidecarStatusResources,
return p
}
+// Resources sets requests and limits at pod-level.
+func (p *PodWrapper) PodLevelResourceRequests(reqMap map[v1.ResourceName]string) *PodWrapper {
+ if len(reqMap) == 0 {
+ return p
+ }
+
+ res := v1.ResourceList{}
+ for k, v := range reqMap {
+ res[k] = resource.MustParse(v)
+ }
+ p.Spec.Resources = &v1.ResourceRequirements{
+ Requests: res,
+ }
+ return p
+}
+
// Req adds a new container to the inner pod with given resource map of requests.
func (p *PodWrapper) Req(reqMap map[v1.ResourceName]string) *PodWrapper {
if len(reqMap) == 0 {
// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
// feature is not enabled.
UseStatusResources bool
+ // InPlacePodLevelResourcesVerticalScalingEnabled indicates whether resources reported by the
+ // PodStatus should be considered when evaluating the pod resources.
+ // This MUST be false if the InPlacePodLevelResourcesVerticalScaling
+ // feature is not enabled.
+ InPlacePodLevelResourcesVerticalScalingEnabled bool
// ExcludeOverhead controls if pod overhead is excluded from the calculation.
ExcludeOverhead bool
// ContainerFn is called with the effective resources required for each container within the pod.
}
if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) {
+
+ var effectiveReqs v1.ResourceList
+ if opts.InPlacePodLevelResourcesVerticalScalingEnabled && opts.UseStatusResources {
+ if pod.Status.Resources != nil {
+ effectiveReqs = determineEffectiveRequests(pod, &ResourceState{
+ Spec: pod.Spec.Resources.Requests,
+ Actuated: pod.Status.Resources.Requests,
+ Allocated: pod.Status.AllocatedResources,
+ })
+ }
+ }
+
for resourceName, quantity := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
reqs[resourceName] = quantity
+ if effectiveReqs != nil {
+ reqs[resourceName] = effectiveReqs[resourceName]
+ }
+
}
}
}
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
- containerReqs = determineContainerReqs(pod, &container, cs)
+ containerReqs = determineEffectiveRequests(pod, &ResourceState{
+ Spec: container.Resources.Requests,
+ Actuated: cs.Resources.Requests,
+ Allocated: cs.AllocatedResources,
+ })
}
}
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
- containerReqs = determineContainerReqs(pod, &container, cs)
+ containerReqs = determineEffectiveRequests(pod, &ResourceState{
+ Spec: container.Resources.Requests,
+ Actuated: cs.Resources.Requests,
+ Allocated: cs.AllocatedResources,
+ })
}
}
}
return reqs
}
-// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
-func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
+type ResourceState struct {
+ Spec v1.ResourceList
+ Actuated v1.ResourceList
+ Allocated v1.ResourceList
+}
+
+func determineEffectiveRequests(pod *v1.Pod, rs *ResourceState) v1.ResourceList {
if IsPodResizeInfeasible(pod) {
- return max(cs.Resources.Requests, cs.AllocatedResources)
+ return max(rs.Actuated, rs.Allocated)
}
- return max(container.Resources.Requests, cs.Resources.Requests, cs.AllocatedResources)
+ return max(rs.Spec, rs.Actuated, rs.Allocated)
}
-// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
-func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
+func determineEffectiveLimits(pod *v1.Pod, rs *ResourceState) v1.ResourceList {
if IsPodResizeInfeasible(pod) {
- return cs.Resources.Limits.DeepCopy()
+ return rs.Actuated.DeepCopy()
}
- return max(container.Resources.Limits, cs.Resources.Limits)
+ return max(rs.Spec, rs.Actuated)
}
// IsPodResizeInfeasible returns true if the pod condition PodResizePending is set to infeasible.
// attempt to reuse the maps if passed, or allocate otherwise
limits := AggregateContainerLimits(pod, opts)
if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) {
+
+ var effectiveLims v1.ResourceList
+ if opts.InPlacePodLevelResourcesVerticalScalingEnabled && opts.UseStatusResources {
+ if pod.Status.Resources != nil {
+ effectiveLims = determineEffectiveLimits(pod, &ResourceState{
+ Spec: pod.Spec.Resources.Limits,
+ Actuated: pod.Status.Resources.Limits,
+ })
+ }
+ }
for resourceName, quantity := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
limits[resourceName] = quantity
+ if effectiveLims != nil {
+ limits[resourceName] = effectiveLims[resourceName]
+ }
}
}
}
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
- containerLimits = determineContainerLimits(pod, &container, cs)
+ containerLimits = determineEffectiveLimits(pod, &ResourceState{
+ Spec: containerLimits,
+ Actuated: cs.Resources.Limits,
+ })
}
}
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
- containerLimits = determineContainerLimits(pod, &container, cs)
+ containerLimits = determineEffectiveLimits(pod, &ResourceState{
+ Spec: containerLimits,
+ Actuated: cs.Resources.Limits,
+ })
}
}
}