Remove --unregistered-node-removal-time flag

This commit is contained in:
Aleksandra Malinowska 2017-11-14 16:35:46 +01:00
parent 362b0319de
commit 2ff962e53e
4 changed files with 14 additions and 18 deletions

View File

@ -77,8 +77,6 @@ type AutoscalingOptions struct {
MinMemoryTotal int64
// NodeGroupAutoDiscovery represents one or more definition(s) of node group auto-discovery
NodeGroupAutoDiscovery string
// UnregisteredNodeRemovalTime represents how long CA waits before removing nodes that are not registered in Kubernetes")
UnregisteredNodeRemovalTime time.Duration
// EstimatorName is the estimator used to estimate the number of needed nodes in scale up.
EstimatorName string
// ExpanderName sets the type of node group expander to be used in scale up

View File

@ -329,7 +329,7 @@ func removeOldUnregisteredNodes(unregisteredNodes []clusterstate.UnregisteredNod
currentTime time.Time, logRecorder *utils.LogEventRecorder) (bool, error) {
removedAny := false
for _, unregisteredNode := range unregisteredNodes {
if unregisteredNode.UnregisteredSince.Add(context.UnregisteredNodeRemovalTime).Before(currentTime) {
if unregisteredNode.UnregisteredSince.Add(context.MaxNodeProvisionTime).Before(currentTime) {
glog.V(0).Infof("Removing unregistered node %v", unregisteredNode.Node.Name)
nodeGroup, err := context.CloudProvider.NodeGroupForNode(unregisteredNode.Node)
if err != nil {
@ -372,7 +372,7 @@ func fixNodeGroupSize(context *AutoscalingContext, currentTime time.Time) (bool,
if incorrectSize == nil {
continue
}
if incorrectSize.FirstObserved.Add(context.UnregisteredNodeRemovalTime).Before(currentTime) {
if incorrectSize.FirstObserved.Add(context.MaxNodeProvisionTime).Before(currentTime) {
delta := incorrectSize.CurrentSize - incorrectSize.ExpectedSize
if delta < 0 {
glog.V(0).Infof("Decreasing size of %s, expected=%d current=%d delta=%d", nodeGroup.Id(),

View File

@ -332,7 +332,7 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) {
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
UnregisteredNodeRemovalTime: 45 * time.Minute,
MaxNodeProvisionTime: 45 * time.Minute,
},
CloudProvider: provider,
ClusterStateRegistry: clusterState,
@ -430,7 +430,7 @@ func TestRemoveFixNodeTargetSize(t *testing.T) {
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
UnregisteredNodeRemovalTime: 45 * time.Minute,
MaxNodeProvisionTime: 45 * time.Minute,
},
CloudProvider: provider,
ClusterStateRegistry: clusterState,

View File

@ -113,7 +113,6 @@ var (
maxTotalUnreadyPercentage = flag.Float64("max-total-unready-percentage", 33, "Maximum percentage of unready nodes after which CA halts operations")
okTotalUnreadyCount = flag.Int("ok-total-unready-count", 3, "Number of allowed unready nodes, irrespective of max-total-unready-percentage")
maxNodeProvisionTime = flag.Duration("max-node-provision-time", 15*time.Minute, "Maximum time CA waits for node to be provisioned")
unregisteredNodeRemovalTime = flag.Duration("unregistered-node-removal-time", 15*time.Minute, "Time that CA waits before removing nodes that are not registered in Kubernetes")
estimatorFlag = flag.String("estimator", estimator.BinpackingEstimatorName,
"Type of resource estimator to be used in scale up. Available values: ["+strings.Join(estimator.AvailableEstimators, ",")+"]")
@ -161,7 +160,6 @@ func createAutoscalerOptions() core.AutoscalerOptions {
MaxMemoryTotal: maxMemoryTotal,
MinMemoryTotal: minMemoryTotal,
NodeGroups: nodeGroupsFlag,
UnregisteredNodeRemovalTime: *unregisteredNodeRemovalTime,
ScaleDownDelayAfterAdd: *scaleDownDelayAfterAdd,
ScaleDownDelayAfterDelete: *scaleDownDelayAfterDelete,
ScaleDownDelayAfterFailure: *scaleDownDelayAfterFailure,