Remove --verify-unschedulabe-pods flag
This flag was true in default setups for every platform, we haven't heard about any user changing it to false and after removing check on PodScheduled condition setting it to false would basically break CA.
This commit is contained in:
parent
ef1241b3c6
commit
95b5b4be94
|
|
@ -90,9 +90,6 @@ type AutoscalingOptions struct {
|
|||
CloudProviderName string
|
||||
// NodeGroups is the list of node groups a.k.a autoscaling targets
|
||||
NodeGroups []string
|
||||
// VerifyUnschedulable is used to enable verification to ensure that each pod marked by Scheduler as unschedulable actually can't be scheduled on any node.
|
||||
// This prevents from adding unnecessary nodes in situation when CA and Scheduler have different configuration.
|
||||
VerifyUnschedulablePods bool
|
||||
// ScaleDownEnabled is used to allow CA to scale down the cluster
|
||||
ScaleDownEnabled bool
|
||||
// ScaleDownDelay sets the duration from the last scale up to the time when CA starts to check scale down options
|
||||
|
|
|
|||
|
|
@ -202,22 +202,18 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
|
|||
//
|
||||
// With the check enabled the last point won't happen because CA will ignore a pod
|
||||
// which is supposed to schedule on an existing node.
|
||||
unschedulablePodsToHelp := allUnschedulablePods
|
||||
schedulablePodsPresent := false
|
||||
if a.VerifyUnschedulablePods {
|
||||
|
||||
glog.V(4).Infof("Filtering out schedulables")
|
||||
newUnschedulablePodsToHelp := FilterOutSchedulable(unschedulablePodsToHelp, readyNodes, allScheduled,
|
||||
unschedulablePodsToHelp := FilterOutSchedulable(allUnschedulablePods, readyNodes, allScheduled,
|
||||
a.PredicateChecker)
|
||||
|
||||
if len(newUnschedulablePodsToHelp) != len(unschedulablePodsToHelp) {
|
||||
if len(unschedulablePodsToHelp) != len(allUnschedulablePods) {
|
||||
glog.V(2).Info("Schedulable pods present")
|
||||
schedulablePodsPresent = true
|
||||
} else {
|
||||
glog.V(4).Info("No schedulable pods")
|
||||
}
|
||||
unschedulablePodsToHelp = newUnschedulablePodsToHelp
|
||||
}
|
||||
|
||||
if len(unschedulablePodsToHelp) == 0 {
|
||||
glog.V(1).Info("No unschedulable pods")
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func FilterOutSchedulable(unschedulableCandidates []*apiv1.Pod, nodes []*apiv1.N
|
|||
|
||||
for _, pod := range unschedulableCandidates {
|
||||
if nodeName, err := predicateChecker.FitsAny(pod, nodeNameToNodeInfo); err == nil {
|
||||
glog.Warningf("Pod %s marked as unschedulable can be scheduled on %s. Ignoring in scale up.", pod.Name, nodeName)
|
||||
glog.V(4).Infof("Pod %s marked as unschedulable can be scheduled on %s. Ignoring in scale up.", pod.Name, nodeName)
|
||||
} else {
|
||||
unschedulablePods = append(unschedulablePods, pod)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,9 +69,6 @@ var (
|
|||
configMapName = flag.String("configmap", "", "The name of the ConfigMap containing settings used for dynamic reconfiguration. Empty string for no ConfigMap.")
|
||||
namespace = flag.String("namespace", "kube-system", "Namespace in which cluster-autoscaler run. If a --configmap flag is also provided, ensure that the configmap exists in this namespace before CA runs.")
|
||||
nodeGroupAutoDiscovery = flag.String("node-group-auto-discovery", "", "One or more definition(s) of node group auto-discovery. A definition is expressed `<name of discoverer per cloud provider>:[<key>[=<value>]]`. Only the `aws` cloud provider is currently supported. The only valid discoverer for it is `asg` and the valid key is `tag`. For example, specifying `--cloud-provider aws` and `--node-group-auto-discovery asg:tag=cluster-autoscaler/auto-discovery/enabled,kubernetes.io/cluster/<YOUR CLUSTER NAME>` results in ASGs tagged with `cluster-autoscaler/auto-discovery/enabled` and `kubernetes.io/cluster/<YOUR CLUSTER NAME>` to be considered as target node groups")
|
||||
verifyUnschedulablePods = flag.Bool("verify-unschedulable-pods", true,
|
||||
"If enabled CA will ensure that each pod marked by Scheduler as unschedulable actually can't be scheduled on any node."+
|
||||
"This prevents from adding unnecessary nodes in situation when CA and Scheduler have different configuration.")
|
||||
scaleDownEnabled = flag.Bool("scale-down-enabled", true, "Should CA scale down the cluster")
|
||||
scaleDownDelay = flag.Duration("scale-down-delay", 10*time.Minute,
|
||||
"Duration from the last scale up to the time when CA starts to check scale down options")
|
||||
|
|
@ -126,7 +123,6 @@ func createAutoscalerOptions() core.AutoscalerOptions {
|
|||
ScaleDownUnneededTime: *scaleDownUnneededTime,
|
||||
ScaleDownUnreadyTime: *scaleDownUnreadyTime,
|
||||
ScaleDownUtilizationThreshold: *scaleDownUtilizationThreshold,
|
||||
VerifyUnschedulablePods: *verifyUnschedulablePods,
|
||||
WriteStatusConfigMap: *writeStatusConfigMapFlag,
|
||||
BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag,
|
||||
ConfigNamespace: *namespace,
|
||||
|
|
|
|||
Loading…
Reference in New Issue