Merge pull request #469 from aleksandra-malinowska/single-unregistered-flag
Remove --unregistered-node-removal-time flag
This commit is contained in:
commit
2589c43a61
|
|
@ -77,8 +77,6 @@ type AutoscalingOptions struct {
|
|||
MinMemoryTotal int64
|
||||
// NodeGroupAutoDiscovery represents one or more definition(s) of node group auto-discovery
|
||||
NodeGroupAutoDiscovery string
|
||||
// UnregisteredNodeRemovalTime represents how long CA waits before removing nodes that are not registered in Kubernetes")
|
||||
UnregisteredNodeRemovalTime time.Duration
|
||||
// EstimatorName is the estimator used to estimate the number of needed nodes in scale up.
|
||||
EstimatorName string
|
||||
// ExpanderName sets the type of node group expander to be used in scale up
|
||||
|
|
|
|||
|
|
@ -329,7 +329,7 @@ func removeOldUnregisteredNodes(unregisteredNodes []clusterstate.UnregisteredNod
|
|||
currentTime time.Time, logRecorder *utils.LogEventRecorder) (bool, error) {
|
||||
removedAny := false
|
||||
for _, unregisteredNode := range unregisteredNodes {
|
||||
if unregisteredNode.UnregisteredSince.Add(context.UnregisteredNodeRemovalTime).Before(currentTime) {
|
||||
if unregisteredNode.UnregisteredSince.Add(context.MaxNodeProvisionTime).Before(currentTime) {
|
||||
glog.V(0).Infof("Removing unregistered node %v", unregisteredNode.Node.Name)
|
||||
nodeGroup, err := context.CloudProvider.NodeGroupForNode(unregisteredNode.Node)
|
||||
if err != nil {
|
||||
|
|
@ -372,7 +372,7 @@ func fixNodeGroupSize(context *AutoscalingContext, currentTime time.Time) (bool,
|
|||
if incorrectSize == nil {
|
||||
continue
|
||||
}
|
||||
if incorrectSize.FirstObserved.Add(context.UnregisteredNodeRemovalTime).Before(currentTime) {
|
||||
if incorrectSize.FirstObserved.Add(context.MaxNodeProvisionTime).Before(currentTime) {
|
||||
delta := incorrectSize.CurrentSize - incorrectSize.ExpectedSize
|
||||
if delta < 0 {
|
||||
glog.V(0).Infof("Decreasing size of %s, expected=%d current=%d delta=%d", nodeGroup.Id(),
|
||||
|
|
|
|||
|
|
@ -332,7 +332,7 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) {
|
|||
|
||||
context := &AutoscalingContext{
|
||||
AutoscalingOptions: AutoscalingOptions{
|
||||
UnregisteredNodeRemovalTime: 45 * time.Minute,
|
||||
MaxNodeProvisionTime: 45 * time.Minute,
|
||||
},
|
||||
CloudProvider: provider,
|
||||
ClusterStateRegistry: clusterState,
|
||||
|
|
@ -430,7 +430,7 @@ func TestRemoveFixNodeTargetSize(t *testing.T) {
|
|||
|
||||
context := &AutoscalingContext{
|
||||
AutoscalingOptions: AutoscalingOptions{
|
||||
UnregisteredNodeRemovalTime: 45 * time.Minute,
|
||||
MaxNodeProvisionTime: 45 * time.Minute,
|
||||
},
|
||||
CloudProvider: provider,
|
||||
ClusterStateRegistry: clusterState,
|
||||
|
|
|
|||
|
|
@ -103,17 +103,16 @@ var (
|
|||
"for scale down when some candidates from previous iteration are no longer valid."+
|
||||
"When calculating the pool size for additional candidates we take"+
|
||||
"max(#nodes * scale-down-candidates-pool-ratio, scale-down-candidates-pool-min-count).")
|
||||
scanInterval = flag.Duration("scan-interval", 10*time.Second, "How often cluster is reevaluated for scale up or down")
|
||||
maxNodesTotal = flag.Int("max-nodes-total", 0, "Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number.")
|
||||
coresTotal = flag.String("cores-total", minMaxFlagString(0, config.DefaultMaxClusterCores), "Minimum and maximum number of cores in cluster, in the format <min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers.")
|
||||
memoryTotal = flag.String("memory-total", minMaxFlagString(0, config.DefaultMaxClusterMemory), "Minimum and maximum number of gigabytes of memory in cluster, in the format <min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers.")
|
||||
cloudProviderFlag = flag.String("cloud-provider", "gce", "Cloud provider type. Allowed values: gce, aws, kubemark")
|
||||
maxEmptyBulkDeleteFlag = flag.Int("max-empty-bulk-delete", 10, "Maximum number of empty nodes that can be deleted at the same time.")
|
||||
maxGracefulTerminationFlag = flag.Int("max-graceful-termination-sec", 10*60, "Maximum number of seconds CA waits for pod termination when trying to scale down a node.")
|
||||
maxTotalUnreadyPercentage = flag.Float64("max-total-unready-percentage", 33, "Maximum percentage of unready nodes after which CA halts operations")
|
||||
okTotalUnreadyCount = flag.Int("ok-total-unready-count", 3, "Number of allowed unready nodes, irrespective of max-total-unready-percentage")
|
||||
maxNodeProvisionTime = flag.Duration("max-node-provision-time", 15*time.Minute, "Maximum time CA waits for node to be provisioned")
|
||||
unregisteredNodeRemovalTime = flag.Duration("unregistered-node-removal-time", 15*time.Minute, "Time that CA waits before removing nodes that are not registered in Kubernetes")
|
||||
scanInterval = flag.Duration("scan-interval", 10*time.Second, "How often cluster is reevaluated for scale up or down")
|
||||
maxNodesTotal = flag.Int("max-nodes-total", 0, "Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number.")
|
||||
coresTotal = flag.String("cores-total", minMaxFlagString(0, config.DefaultMaxClusterCores), "Minimum and maximum number of cores in cluster, in the format <min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers.")
|
||||
memoryTotal = flag.String("memory-total", minMaxFlagString(0, config.DefaultMaxClusterMemory), "Minimum and maximum number of gigabytes of memory in cluster, in the format <min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers.")
|
||||
cloudProviderFlag = flag.String("cloud-provider", "gce", "Cloud provider type. Allowed values: gce, aws, kubemark")
|
||||
maxEmptyBulkDeleteFlag = flag.Int("max-empty-bulk-delete", 10, "Maximum number of empty nodes that can be deleted at the same time.")
|
||||
maxGracefulTerminationFlag = flag.Int("max-graceful-termination-sec", 10*60, "Maximum number of seconds CA waits for pod termination when trying to scale down a node.")
|
||||
maxTotalUnreadyPercentage = flag.Float64("max-total-unready-percentage", 33, "Maximum percentage of unready nodes after which CA halts operations")
|
||||
okTotalUnreadyCount = flag.Int("ok-total-unready-count", 3, "Number of allowed unready nodes, irrespective of max-total-unready-percentage")
|
||||
maxNodeProvisionTime = flag.Duration("max-node-provision-time", 15*time.Minute, "Maximum time CA waits for node to be provisioned")
|
||||
|
||||
estimatorFlag = flag.String("estimator", estimator.BinpackingEstimatorName,
|
||||
"Type of resource estimator to be used in scale up. Available values: ["+strings.Join(estimator.AvailableEstimators, ",")+"]")
|
||||
|
|
@ -161,7 +160,6 @@ func createAutoscalerOptions() core.AutoscalerOptions {
|
|||
MaxMemoryTotal: maxMemoryTotal,
|
||||
MinMemoryTotal: minMemoryTotal,
|
||||
NodeGroups: nodeGroupsFlag,
|
||||
UnregisteredNodeRemovalTime: *unregisteredNodeRemovalTime,
|
||||
ScaleDownDelayAfterAdd: *scaleDownDelayAfterAdd,
|
||||
ScaleDownDelayAfterDelete: *scaleDownDelayAfterDelete,
|
||||
ScaleDownDelayAfterFailure: *scaleDownDelayAfterFailure,
|
||||
|
|
|
|||
Loading…
Reference in New Issue