Merge pull request https://github.com/kubernetes/contrib/pull/2502 from mwielgus/ca-starting-fix

Automatic merge from submit-queue

Cluster-autoscaler: Fix isNodeStarting

Fix for: https://github.com/kubernetes/kubernetes/issues/43709

cc: @MaciekPytel @fgrzadkowski
This commit is contained in:
Kubernetes Submit Queue 2017-03-28 01:14:45 -07:00 committed by GitHub
commit 6c991c8465
4 changed files with 22 additions and 3 deletions

View File

@ -21,7 +21,7 @@ ifndef REGISTRY
$(ERR)
endif
docker build --pull -t ${REGISTRY}/cluster-autoscaler:${TAG} .
gcloud docker push ${REGISTRY}/cluster-autoscaler:${TAG}
gcloud docker -- push ${REGISTRY}/cluster-autoscaler:${TAG}
clean:
rm -f cluster-autoscaler

View File

@ -40,7 +40,7 @@ const (
// MaxStatusSettingDelayAfterCreation is the maximum time for node to set its initial status after the
// node is registered.
MaxStatusSettingDelayAfterCreation = time.Minute
MaxStatusSettingDelayAfterCreation = 2 * time.Minute
)
// ScaleUpRequest contains information about the requested node group scale up.
@ -615,6 +615,16 @@ func isNodeNotStarted(node *apiv1.Node) bool {
condition.LastTransitionTime.Time.Sub(node.CreationTimestamp.Time) < MaxStatusSettingDelayAfterCreation {
return true
}
if condition.Type == apiv1.NodeOutOfDisk &&
condition.Status == apiv1.ConditionTrue &&
condition.LastTransitionTime.Time.Sub(node.CreationTimestamp.Time) < MaxStatusSettingDelayAfterCreation {
return true
}
if condition.Type == apiv1.NodeNetworkUnavailable &&
condition.Status == apiv1.ConditionTrue &&
condition.LastTransitionTime.Time.Sub(node.CreationTimestamp.Time) < MaxStatusSettingDelayAfterCreation {
return true
}
}
return false
}

View File

@ -59,6 +59,7 @@ func ScaleUp(context *AutoscalingContext, unschedulablePods []*apiv1.Pod, nodes
upcomingNodes = append(upcomingNodes, nodeTemplate)
}
}
glog.V(4).Infof("Upcoming %d nodes", len(upcomingNodes))
podsRemainUnschedulable := make(map[*apiv1.Pod]bool)
expansionOptions := make([]expander.Option, 0)
@ -119,12 +120,16 @@ func ScaleUp(context *AutoscalingContext, unschedulablePods []*apiv1.Pod, nodes
}
if option.NodeCount > 0 {
expansionOptions = append(expansionOptions, option)
} else {
glog.V(2).Infof("No need for any nodes in %s", nodeGroup.Id())
}
} else {
glog.V(4).Info("No pod can fit to %s", nodeGroup.Id())
}
}
if len(expansionOptions) == 0 {
glog.V(1).Info("No node group can help with pending pods.")
glog.V(1).Info("No expansion options")
for pod, unschedulable := range podsRemainUnschedulable {
if unschedulable {
context.Recorder.Event(pod, apiv1.EventTypeNormal, "NotTriggerScaleUp",

View File

@ -187,12 +187,16 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) {
// in the describe situation.
schedulablePodsPresent := false
if a.VerifyUnschedulablePods {
glog.V(4).Infof("Filtering out schedulables")
newUnschedulablePodsToHelp := FilterOutSchedulable(unschedulablePodsToHelp, readyNodes, allScheduled,
a.PredicateChecker)
if len(newUnschedulablePodsToHelp) != len(unschedulablePodsToHelp) {
glog.V(2).Info("Schedulable pods present")
schedulablePodsPresent = true
} else {
glog.V(4).Info("No schedulable pods")
}
unschedulablePodsToHelp = newUnschedulablePodsToHelp
}