Remove filterOutSchedulableSimple

This commit is contained in:
Łukasz Osipiuk 2020-01-15 19:01:27 +01:00
parent 2c81caabb1
commit dd9fe48f46
5 changed files with 97 additions and 333 deletions

View File

@ -133,10 +133,6 @@ type AutoscalingOptions struct {
MaxBulkSoftTaintCount int
// MaxBulkSoftTaintTime sets the maximum duration of single run of PreferNoSchedule tainting.
MaxBulkSoftTaintTime time.Duration
// Filtering out schedulable pods before CA scale up by trying to pack the schedulable pods on free capacity on existing nodes.
// Setting it to false employs a more lenient filtering approach that does not try to pack the pods on the nodes.
// Pods with nominatedNodeName set are always filtered out.
FilterOutSchedulablePodsUsesPacking bool
// IgnoredTaints is a list of taints to ignore when considering a node template for scheduling.
IgnoredTaints []string
// AWSUseStaticInstanceList tells if AWS cloud provider use static instance type list or dynamically fetch from remote APIs.

View File

@ -26,7 +26,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/metrics"
"k8s.io/autoscaler/cluster-autoscaler/processors/pods"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/glogx"
schedulerutil "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
apiv1 "k8s.io/api/core/v1"
@ -74,13 +73,8 @@ func (filterOutSchedulablePodListProcessor) Process(
unschedulablePodsToHelp = unschedulablePods
}
if context.FilterOutSchedulablePodsUsesPacking {
unschedulablePodsToHelp = filterOutSchedulableByPacking(unschedulablePodsToHelp, readyNodes, allScheduledPods,
context.PredicateChecker, context.ExpendablePodsPriorityCutoff, true)
} else {
unschedulablePodsToHelp = filterOutSchedulableSimple(unschedulablePodsToHelp, readyNodes, allScheduledPods,
context.PredicateChecker, context.ExpendablePodsPriorityCutoff)
}
unschedulablePodsToHelp = filterOutSchedulableByPacking(unschedulablePodsToHelp, readyNodes, allScheduledPods,
context.PredicateChecker, context.ExpendablePodsPriorityCutoff, true)
metrics.UpdateDurationFromStart(metrics.FilterOutSchedulable, filterOutSchedulableStart)
@ -140,44 +134,3 @@ func moreImportantPod(pod1, pod2 *apiv1.Pod) bool {
p2 := pod.GetPodPriority(pod2)
return p1 > p2
}
// filterOutSchedulableSimple checks whether pods from <unschedulableCandidates> marked as unschedulable
// by Scheduler actually can't be scheduled on any node and filter out the ones that can.
// It takes into account pods that are bound to node and will be scheduled after lower priority pod preemption.
func filterOutSchedulableSimple(unschedulableCandidates []*apiv1.Pod, nodes []*apiv1.Node, allScheduled []*apiv1.Pod,
predicateChecker *simulator.PredicateChecker, expendablePodsPriorityCutoff int) []*apiv1.Pod {
var unschedulablePods []*apiv1.Pod
nonExpendableScheduled := utils.FilterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
nodeNameToNodeInfo := schedulerutil.CreateNodeNameToInfoMap(nonExpendableScheduled, nodes)
podSchedulable := make(utils.PodSchedulableMap)
loggingQuota := glogx.PodsLoggingQuota()
for _, pod := range unschedulableCandidates {
cachedError, found := podSchedulable.Get(pod)
// Try to get result from cache.
if found {
if cachedError != nil {
unschedulablePods = append(unschedulablePods, pod)
} else {
glogx.V(4).UpTo(loggingQuota).Infof("Pod %s marked as unschedulable can be scheduled (based on simulation run for other pod owned by the same controller). Ignoring in scale up.", pod.Name)
}
continue
}
// Not found in cache, have to run the predicates.
nodeName, err := predicateChecker.FitsAny(pod, nodeNameToNodeInfo)
// err returned from FitsAny isn't a PredicateError.
// Hello, ugly hack. I wish you weren't here.
var predicateError *simulator.PredicateError
if err != nil {
predicateError = simulator.NewPredicateError("FitsAny", err, nil, nil)
unschedulablePods = append(unschedulablePods, pod)
} else {
glogx.V(4).UpTo(loggingQuota).Infof("Pod %s marked as unschedulable can be scheduled on %s. Ignoring in scale up.", pod.Name, nodeName)
}
podSchedulable.Set(pod, predicateError)
}
glogx.V(4).Over(loggingQuota).Infof("%v other pods marked as unschedulable can be scheduled.", -loggingQuota.Left())
return unschedulablePods
}

View File

@ -103,65 +103,3 @@ func TestFilterOutSchedulableByPacking(t *testing.T) {
assert.Equal(t, p3_1, res4[3])
assert.Equal(t, p3_2, res4[4])
}
func TestFilterOutSchedulableSimple(t *testing.T) {
rc1 := apiv1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "rc1",
Namespace: "default",
SelfLink: testapi.Default.SelfLink("replicationcontrollers", "rc"),
UID: "12345678-1234-1234-1234-123456789012",
},
}
rc2 := apiv1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "rc2",
Namespace: "default",
SelfLink: testapi.Default.SelfLink("replicationcontrollers", "rc"),
UID: "12345678-1234-1234-1234-12345678901a",
},
}
p1 := BuildTestPod("p1", 1500, 200000)
p2_1 := BuildTestPod("p2_2", 3000, 200000)
p2_1.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
p2_2 := BuildTestPod("p2_2", 3000, 200000)
p2_2.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
p3_1 := BuildTestPod("p3", 100, 200000)
p3_1.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc2.UID)
p3_2 := BuildTestPod("p3", 100, 200000)
p3_2.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc2.UID)
unschedulablePods := []*apiv1.Pod{p1, p2_1, p2_2, p3_1, p3_2}
scheduledPod1 := BuildTestPod("s1", 100, 200000)
scheduledPod2 := BuildTestPod("s2", 1500, 200000)
scheduledPod3 := BuildTestPod("s3", 4000, 200000)
var priority1 int32 = 1
scheduledPod3.Spec.Priority = &priority1
scheduledPod1.Spec.NodeName = "node1"
scheduledPod2.Spec.NodeName = "node1"
scheduledPod2.Spec.NodeName = "node1"
podWaitingForPreemption := BuildTestPod("w1", 1500, 200000)
var priority100 int32 = 100
podWaitingForPreemption.Spec.Priority = &priority100
podWaitingForPreemption.Status.NominatedNodeName = "node1"
node := BuildTestNode("node1", 2000, 2000000)
SetNodeReadyState(node, true, time.Time{})
predicateChecker := simulator.NewTestPredicateChecker()
res := filterOutSchedulableSimple(unschedulablePods, []*apiv1.Node{node}, []*apiv1.Pod{scheduledPod1, scheduledPod3}, predicateChecker, 10)
assert.Equal(t, 2, len(res))
assert.Equal(t, p2_1, res[0])
assert.Equal(t, p2_2, res[1])
res2 := filterOutSchedulableSimple(unschedulablePods, []*apiv1.Node{node}, []*apiv1.Pod{scheduledPod1, scheduledPod2, scheduledPod3}, predicateChecker, 10)
assert.Equal(t, 3, len(res2))
assert.Equal(t, p1, res2[0])
assert.Equal(t, p2_1, res2[1])
assert.Equal(t, p2_2, res2[2])
}

View File

@ -172,15 +172,14 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
// Create context with mocked lister registry.
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 1,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
FilterOutSchedulablePodsUsesPacking: true,
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 1,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
@ -359,17 +358,16 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
// Create context with mocked lister registry.
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 100,
MaxCoresTotal: 100,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
NodeAutoprovisioningEnabled: true,
MaxAutoprovisionedNodeGroupCount: 10, // Pods with null priority are always non expendable. Test if it works.
FilterOutSchedulablePodsUsesPacking: true,
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 100,
MaxCoresTotal: 100,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
NodeAutoprovisioningEnabled: true,
MaxAutoprovisionedNodeGroupCount: 10, // Pods with null priority are always non expendable. Test if it works.
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
@ -494,16 +492,15 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
// Create context with mocked lister registry.
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
MaxNodeProvisionTime: 10 * time.Second,
FilterOutSchedulablePodsUsesPacking: true,
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
MaxNodeProvisionTime: 10 * time.Second,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
@ -571,119 +568,6 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
podDisruptionBudgetListerMock, daemonSetListerMock, onScaleUpMock, onScaleDownMock)
}
func TestStaticAutoscalerRunOncePodsWithFilterOutSchedulablePodsUsesPackingFalse(t *testing.T) {
readyNodeLister := kubernetes.NewTestNodeLister(nil)
allNodeLister := kubernetes.NewTestNodeLister(nil)
scheduledPodMock := &podListerMock{}
unschedulablePodMock := &podListerMock{}
podDisruptionBudgetListerMock := &podDisruptionBudgetListerMock{}
daemonSetListerMock := &daemonSetListerMock{}
onScaleUpMock := &onScaleUpMock{}
onScaleDownMock := &onScaleDownMock{}
n1 := BuildTestNode("n1", 100, 1000)
SetNodeReadyState(n1, true, time.Now())
n2 := BuildTestNode("n2", 1000, 1000)
SetNodeReadyState(n2, true, time.Now())
n3 := BuildTestNode("n3", 1000, 1000)
SetNodeReadyState(n3, true, time.Now())
// shared owner reference
ownerRef := GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", "")
p1 := BuildTestPod("p1", 40, 0)
p1.OwnerReferences = ownerRef
p1.Spec.NodeName = "n1"
p2 := BuildTestPod("p2", 400, 0)
p2.OwnerReferences = ownerRef
p2.Spec.NodeName = "n2"
p3 := BuildTestPod("p3", 400, 0)
p3.OwnerReferences = ownerRef
p3.Spec.NodeName = "n2"
p4 := BuildTestPod("p4", 500, 0)
p4.OwnerReferences = ownerRef
p5 := BuildTestPod("p5", 800, 0)
p5.OwnerReferences = ownerRef
p5.Status.NominatedNodeName = "n3"
p6 := BuildTestPod("p6", 1000, 0)
p6.OwnerReferences = ownerRef
provider := testprovider.NewTestCloudProvider(
func(id string, delta int) error {
return onScaleUpMock.ScaleUp(id, delta)
}, func(id string, name string) error {
return onScaleDownMock.ScaleDown(id, name)
})
provider.AddNodeGroup("ng1", 0, 10, 1)
provider.AddNodeGroup("ng2", 0, 10, 2)
provider.AddNode("ng1", n1)
provider.AddNode("ng2", n2)
provider.AddNode("ng2", n3)
assert.NotNil(t, provider)
ng2 := reflect.ValueOf(provider.GetNodeGroup("ng2")).Interface().(*testprovider.TestNodeGroup)
assert.NotNil(t, ng2)
// Create context with mocked lister registry.
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
ExpendablePodsPriorityCutoff: 10,
//Turn off filtering schedulables using packing
FilterOutSchedulablePodsUsesPacking: false,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
assert.NoError(t, err)
listerRegistry := kube_util.NewListerRegistry(allNodeLister, readyNodeLister, scheduledPodMock,
unschedulablePodMock, podDisruptionBudgetListerMock, daemonSetListerMock,
nil, nil, nil, nil)
context.ListerRegistry = listerRegistry
clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
OkTotalUnreadyCount: 1,
MaxNodeProvisionTime: 10 * time.Second,
}
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, newBackoff())
sd := NewScaleDown(&context, clusterState)
autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
clusterStateRegistry: clusterState,
lastScaleUpTime: time.Now(),
lastScaleDownFailTime: time.Now(),
scaleDown: sd,
processors: NewTestProcessors(),
processorCallbacks: processorCallbacks,
}
// Scale up
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
scheduledPodMock.On("List").Return([]*apiv1.Pod{p1, p2, p3}, nil).Times(2) // 1 to get pods + 1 when building nodeInfo map
unschedulablePodMock.On("List").Return([]*apiv1.Pod{p4, p5, p6}, nil).Once()
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
onScaleUpMock.On("ScaleUp", "ng2", 2).Return(nil).Once()
err = autoscaler.RunOnce(time.Now())
assert.NoError(t, err)
mock.AssertExpectationsForObjects(t, scheduledPodMock, unschedulablePodMock,
podDisruptionBudgetListerMock, daemonSetListerMock, onScaleUpMock, onScaleDownMock)
}
func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
readyNodeLister := kubernetes.NewTestNodeLister(nil)
allNodeLister := kubernetes.NewTestNodeLister(nil)
@ -751,16 +635,15 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
// Create context with mocked lister registry.
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
ExpendablePodsPriorityCutoff: 10,
FilterOutSchedulablePodsUsesPacking: true,
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
ExpendablePodsPriorityCutoff: 10,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
@ -1029,16 +912,15 @@ func TestStaticAutoscalerInstaceCreationErrors(t *testing.T) {
// Create context with mocked lister registry.
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
ExpendablePodsPriorityCutoff: 10,
FilterOutSchedulablePodsUsesPacking: true,
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5,
MaxNodesTotal: 10,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUnneededTime: time.Minute,
ExpendablePodsPriorityCutoff: 10,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()

View File

@ -163,14 +163,10 @@ var (
nodeAutoprovisioningEnabled = flag.Bool("node-autoprovisioning-enabled", false, "Should CA autoprovision node groups when needed")
maxAutoprovisionedNodeGroupCount = flag.Int("max-autoprovisioned-node-group-count", 15, "The maximum number of autoprovisioned groups in the cluster.")
unremovableNodeRecheckTimeout = flag.Duration("unremovable-node-recheck-timeout", 5*time.Minute, "The timeout before we check again a node that couldn't be removed before")
expendablePodsPriorityCutoff = flag.Int("expendable-pods-priority-cutoff", -10, "Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.")
regional = flag.Bool("regional", false, "Cluster is regional.")
newPodScaleUpDelay = flag.Duration("new-pod-scale-up-delay", 0*time.Second, "Pods less than this old will not be considered for scale-up.")
filterOutSchedulablePodsUsesPacking = flag.Bool("filter-out-schedulable-pods-uses-packing", true,
"Filtering out schedulable pods before CA scale up by trying to pack the schedulable pods on free capacity on existing nodes."+
"Setting it to false employs a more lenient filtering approach that does not try to pack the pods on the nodes."+
"Pods with nominatedNodeName set are always filtered out.")
unremovableNodeRecheckTimeout = flag.Duration("unremovable-node-recheck-timeout", 5*time.Minute, "The timeout before we check again a node that couldn't be removed before")
expendablePodsPriorityCutoff = flag.Int("expendable-pods-priority-cutoff", -10, "Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.")
regional = flag.Bool("regional", false, "Cluster is regional.")
newPodScaleUpDelay = flag.Duration("new-pod-scale-up-delay", 0*time.Second, "Pods less than this old will not be considered for scale-up.")
ignoreTaintsFlag = multiStringFlag("ignore-taint", "Specifies a taint to ignore in node templates when considering to scale a node group")
awsUseStaticInstanceList = flag.Bool("aws-use-static-instance-list", false, "Should CA fetch instance types in runtime or use a static list. AWS only")
@ -195,53 +191,52 @@ func createAutoscalingOptions() config.AutoscalingOptions {
klog.Fatalf("Failed to parse flags: %v", err)
}
return config.AutoscalingOptions{
CloudConfig: *cloudConfig,
CloudProviderName: *cloudProviderFlag,
NodeGroupAutoDiscovery: *nodeGroupAutoDiscoveryFlag,
MaxTotalUnreadyPercentage: *maxTotalUnreadyPercentage,
OkTotalUnreadyCount: *okTotalUnreadyCount,
ScaleUpFromZero: *scaleUpFromZero,
EstimatorName: *estimatorFlag,
ExpanderName: *expanderFlag,
IgnoreDaemonSetsUtilization: *ignoreDaemonSetsUtilization,
IgnoreMirrorPodsUtilization: *ignoreMirrorPodsUtilization,
MaxBulkSoftTaintCount: *maxBulkSoftTaintCount,
MaxBulkSoftTaintTime: *maxBulkSoftTaintTime,
MaxEmptyBulkDelete: *maxEmptyBulkDeleteFlag,
MaxGracefulTerminationSec: *maxGracefulTerminationFlag,
MaxNodeProvisionTime: *maxNodeProvisionTime,
MaxNodesTotal: *maxNodesTotal,
MaxCoresTotal: maxCoresTotal,
MinCoresTotal: minCoresTotal,
MaxMemoryTotal: maxMemoryTotal,
MinMemoryTotal: minMemoryTotal,
GpuTotal: parsedGpuTotal,
NodeGroups: *nodeGroupsFlag,
ScaleDownDelayAfterAdd: *scaleDownDelayAfterAdd,
ScaleDownDelayAfterDelete: *scaleDownDelayAfterDelete,
ScaleDownDelayAfterFailure: *scaleDownDelayAfterFailure,
ScaleDownEnabled: *scaleDownEnabled,
ScaleDownUnneededTime: *scaleDownUnneededTime,
ScaleDownUnreadyTime: *scaleDownUnreadyTime,
ScaleDownUtilizationThreshold: *scaleDownUtilizationThreshold,
ScaleDownGpuUtilizationThreshold: *scaleDownGpuUtilizationThreshold,
ScaleDownNonEmptyCandidatesCount: *scaleDownNonEmptyCandidatesCount,
ScaleDownCandidatesPoolRatio: *scaleDownCandidatesPoolRatio,
ScaleDownCandidatesPoolMinCount: *scaleDownCandidatesPoolMinCount,
WriteStatusConfigMap: *writeStatusConfigMapFlag,
BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag,
ConfigNamespace: *namespace,
ClusterName: *clusterName,
NodeAutoprovisioningEnabled: *nodeAutoprovisioningEnabled,
MaxAutoprovisionedNodeGroupCount: *maxAutoprovisionedNodeGroupCount,
UnremovableNodeRecheckTimeout: *unremovableNodeRecheckTimeout,
ExpendablePodsPriorityCutoff: *expendablePodsPriorityCutoff,
Regional: *regional,
NewPodScaleUpDelay: *newPodScaleUpDelay,
FilterOutSchedulablePodsUsesPacking: *filterOutSchedulablePodsUsesPacking,
IgnoredTaints: *ignoreTaintsFlag,
NodeDeletionDelayTimeout: *nodeDeletionDelayTimeout,
AWSUseStaticInstanceList: *awsUseStaticInstanceList,
CloudConfig: *cloudConfig,
CloudProviderName: *cloudProviderFlag,
NodeGroupAutoDiscovery: *nodeGroupAutoDiscoveryFlag,
MaxTotalUnreadyPercentage: *maxTotalUnreadyPercentage,
OkTotalUnreadyCount: *okTotalUnreadyCount,
ScaleUpFromZero: *scaleUpFromZero,
EstimatorName: *estimatorFlag,
ExpanderName: *expanderFlag,
IgnoreDaemonSetsUtilization: *ignoreDaemonSetsUtilization,
IgnoreMirrorPodsUtilization: *ignoreMirrorPodsUtilization,
MaxBulkSoftTaintCount: *maxBulkSoftTaintCount,
MaxBulkSoftTaintTime: *maxBulkSoftTaintTime,
MaxEmptyBulkDelete: *maxEmptyBulkDeleteFlag,
MaxGracefulTerminationSec: *maxGracefulTerminationFlag,
MaxNodeProvisionTime: *maxNodeProvisionTime,
MaxNodesTotal: *maxNodesTotal,
MaxCoresTotal: maxCoresTotal,
MinCoresTotal: minCoresTotal,
MaxMemoryTotal: maxMemoryTotal,
MinMemoryTotal: minMemoryTotal,
GpuTotal: parsedGpuTotal,
NodeGroups: *nodeGroupsFlag,
ScaleDownDelayAfterAdd: *scaleDownDelayAfterAdd,
ScaleDownDelayAfterDelete: *scaleDownDelayAfterDelete,
ScaleDownDelayAfterFailure: *scaleDownDelayAfterFailure,
ScaleDownEnabled: *scaleDownEnabled,
ScaleDownUnneededTime: *scaleDownUnneededTime,
ScaleDownUnreadyTime: *scaleDownUnreadyTime,
ScaleDownUtilizationThreshold: *scaleDownUtilizationThreshold,
ScaleDownGpuUtilizationThreshold: *scaleDownGpuUtilizationThreshold,
ScaleDownNonEmptyCandidatesCount: *scaleDownNonEmptyCandidatesCount,
ScaleDownCandidatesPoolRatio: *scaleDownCandidatesPoolRatio,
ScaleDownCandidatesPoolMinCount: *scaleDownCandidatesPoolMinCount,
WriteStatusConfigMap: *writeStatusConfigMapFlag,
BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag,
ConfigNamespace: *namespace,
ClusterName: *clusterName,
NodeAutoprovisioningEnabled: *nodeAutoprovisioningEnabled,
MaxAutoprovisionedNodeGroupCount: *maxAutoprovisionedNodeGroupCount,
UnremovableNodeRecheckTimeout: *unremovableNodeRecheckTimeout,
ExpendablePodsPriorityCutoff: *expendablePodsPriorityCutoff,
Regional: *regional,
NewPodScaleUpDelay: *newPodScaleUpDelay,
IgnoredTaints: *ignoreTaintsFlag,
NodeDeletionDelayTimeout: *nodeDeletionDelayTimeout,
AWSUseStaticInstanceList: *awsUseStaticInstanceList,
}
}