Fix typos related to max-graceful-termination-sec

This commit is contained in:
Maciej Pytel 2017-06-14 14:14:21 +02:00
parent 060b6c2284
commit 767367c866
4 changed files with 14 additions and 14 deletions

View File

@ -77,9 +77,9 @@ type AutoscalingOptions struct {
EstimatorName string
// ExpanderName sets the type of node group expander to be used in scale up
ExpanderName string
// MaxGratefulTerminationSec is maximum number of seconds scale down waits for pods to terminante before
// MaxGracefulTerminationSec is maximum number of seconds scale down waits for pods to terminante before
// removing the node from cloud provider.
MaxGratefulTerminationSec int
MaxGracefulTerminationSec int
// Maximum time CA waits for node to be provisioned
MaxNodeProvisionTime time.Duration
// MaxTotalUnreadyPercentage is the maximum percentage of unready nodes after which CA halts operations

View File

@ -375,7 +375,7 @@ func getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDele
}
func deleteNode(context *AutoscalingContext, node *apiv1.Node, pods []*apiv1.Pod) errors.AutoscalerError {
if err := drainNode(node, pods, context.ClientSet, context.Recorder, context.MaxGratefulTerminationSec,
if err := drainNode(node, pods, context.ClientSet, context.Recorder, context.MaxGracefulTerminationSec,
MaxPodEvictionTime, EvictionRetryTime); err != nil {
return err
}
@ -383,9 +383,9 @@ func deleteNode(context *AutoscalingContext, node *apiv1.Node, pods []*apiv1.Pod
}
func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
maxGratefulTerminationSec int, retryUntil time.Time, waitBetweenRetries time.Duration) error {
maxGracefulTerminationSec int, retryUntil time.Time, waitBetweenRetries time.Duration) error {
recorder.Eventf(podToEvict, apiv1.EventTypeNormal, "ScaleDown", "deleting pod for node scale down")
maxGraceful64 := int64(maxGratefulTerminationSec)
maxGraceful64 := int64(maxGracefulTerminationSec)
var lastError error
for first := true; first || time.Now().Before(retryUntil); time.Sleep(waitBetweenRetries) {
first = false
@ -411,7 +411,7 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube
// Performs drain logic on the node. Marks the node as unschedulable and later removes all pods, giving
// them up to MaxGracefulTerminationTime to finish.
func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
maxGratefulTerminationSec int, maxPodEvictionTime time.Duration, waitBetweenRetries time.Duration) errors.AutoscalerError {
maxGracefulTerminationSec int, maxPodEvictionTime time.Duration, waitBetweenRetries time.Duration) errors.AutoscalerError {
drainSuccessful := false
toEvict := len(pods)
@ -434,7 +434,7 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
confirmations := make(chan error, toEvict)
for _, pod := range pods {
go func(podToEvict *apiv1.Pod) {
confirmations <- evictPod(podToEvict, client, recorder, maxGratefulTerminationSec, retryUntil, waitBetweenRetries)
confirmations <- evictPod(podToEvict, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
}(pod)
}
@ -458,9 +458,9 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
errors.ApiCallError, "Failed to drain node %s/%s, due to following errors: %v", node.Namespace, node.Name, evictionErrs)
}
// Evictions created successfully, wait maxGratefulTerminationSec + PodEvictionHeadroom to see if pods really disappeared.
// Evictions created successfully, wait maxGracefulTerminationSec + PodEvictionHeadroom to see if pods really disappeared.
allGone := true
for start := time.Now(); time.Now().Sub(start) < time.Duration(maxGratefulTerminationSec)*time.Second+PodEvictionHeadroom; time.Sleep(5 * time.Second) {
for start := time.Now(); time.Now().Sub(start) < time.Duration(maxGracefulTerminationSec)*time.Second+PodEvictionHeadroom; time.Sleep(5 * time.Second) {
allGone = true
for _, pod := range pods {
podreturned, err := client.Core().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})

View File

@ -278,7 +278,7 @@ func TestScaleDown(t *testing.T) {
AutoscalingOptions: AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute,
MaxGratefulTerminationSec: 60,
MaxGracefulTerminationSec: 60,
},
PredicateChecker: simulator.NewTestPredicateChecker(),
CloudProvider: provider,
@ -337,7 +337,7 @@ func TestNoScaleDownUnready(t *testing.T) {
ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute,
ScaleDownUnreadyTime: time.Hour,
MaxGratefulTerminationSec: 60,
MaxGracefulTerminationSec: 60,
},
PredicateChecker: simulator.NewTestPredicateChecker(),
CloudProvider: provider,
@ -441,7 +441,7 @@ func TestScaleDownNoMove(t *testing.T) {
ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute,
ScaleDownUnreadyTime: time.Hour,
MaxGratefulTerminationSec: 60,
MaxGracefulTerminationSec: 60,
},
PredicateChecker: simulator.NewTestPredicateChecker(),
CloudProvider: provider,

View File

@ -86,7 +86,7 @@ var (
maxNodesTotal = flag.Int("max-nodes-total", 0, "Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number.")
cloudProviderFlag = flag.String("cloud-provider", "gce", "Cloud provider type. Allowed values: gce, aws, azure")
maxEmptyBulkDeleteFlag = flag.Int("max-empty-bulk-delete", 10, "Maximum number of empty nodes that can be deleted at the same time.")
maxGratefulTerminationFlag = flag.Int("max-grateful-termination-sec", 60, "Maximum number of seconds CA waints for pod termination when trying to scale down a node.")
maxGracefulTerminationFlag = flag.Int("max-graceful-termination-sec", 60, "Maximum number of seconds CA waints for pod termination when trying to scale down a node.")
maxTotalUnreadyPercentage = flag.Float64("max-total-unready-percentage", 33, "Maximum percentage of unready nodes after which CA halts operations")
okTotalUnreadyCount = flag.Int("ok-total-unready-count", 3, "Number of allowed unready nodes, irrespective of max-total-unready-percentage")
maxNodeProvisionTime = flag.Duration("max-node-provision-time", 15*time.Minute, "Maximum time CA waits for node to be provisioned")
@ -114,7 +114,7 @@ func createAutoscalerOptions() core.AutoscalerOptions {
EstimatorName: *estimatorFlag,
ExpanderName: *expanderFlag,
MaxEmptyBulkDelete: *maxEmptyBulkDeleteFlag,
MaxGratefulTerminationSec: *maxGratefulTerminationFlag,
MaxGracefulTerminationSec: *maxGracefulTerminationFlag,
MaxNodeProvisionTime: *maxNodeProvisionTime,
MaxNodesTotal: *maxNodesTotal,
NodeGroups: nodeGroupsFlag,