mirror of https://github.com/kubernetes/kops.git
Extract drainTerminateAndWait()
This commit is contained in:
parent
adaf903b90
commit
91f4920537
|
|
@ -149,57 +149,11 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
|||
}
|
||||
|
||||
for _, u := range update {
|
||||
instanceId := u.ID
|
||||
|
||||
nodeName := ""
|
||||
if u.Node != nil {
|
||||
nodeName = u.Node.Name
|
||||
}
|
||||
|
||||
if isBastion {
|
||||
// We don't want to validate for bastions - they aren't part of the cluster
|
||||
} else if rollingUpdateData.CloudOnly {
|
||||
|
||||
klog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.")
|
||||
|
||||
} else {
|
||||
|
||||
if u.Node != nil {
|
||||
klog.Infof("Draining the node: %q.", nodeName)
|
||||
|
||||
if err = r.DrainNode(u, rollingUpdateData); err != nil {
|
||||
if rollingUpdateData.FailOnDrainError {
|
||||
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
|
||||
}
|
||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
|
||||
}
|
||||
}
|
||||
|
||||
// We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned
|
||||
// (It often seems like GCE tries to re-use names)
|
||||
if !isBastion && !rollingUpdateData.CloudOnly {
|
||||
if u.Node == nil {
|
||||
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId)
|
||||
} else {
|
||||
klog.Infof("deleting node %q from kubernetes", nodeName)
|
||||
if err := r.deleteNode(u.Node, rollingUpdateData); err != nil {
|
||||
return fmt.Errorf("error deleting node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = r.DeleteInstance(u); err != nil {
|
||||
klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err)
|
||||
err = r.drainTerminateAndWait(u, rollingUpdateData, isBastion, sleepAfterTerminate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the minimum interval
|
||||
klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate)
|
||||
time.Sleep(sleepAfterTerminate)
|
||||
|
||||
if rollingUpdateData.CloudOnly {
|
||||
klog.Warningf("Not validating cluster as cloudonly flag is set.")
|
||||
|
||||
|
|
@ -218,6 +172,11 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
|||
}
|
||||
|
||||
if rollingUpdateData.Interactive {
|
||||
nodeName := ""
|
||||
if u.Node != nil {
|
||||
nodeName = u.Node.Name
|
||||
}
|
||||
|
||||
stopPrompting, err := promptInteractive(u.ID, nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -290,6 +249,61 @@ func (r *RollingUpdateInstanceGroup) patchTaint(rollingUpdateData *RollingUpdate
|
|||
return err
|
||||
}
|
||||
|
||||
func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, isBastion bool, sleepAfterTerminate time.Duration) error {
|
||||
instanceId := u.ID
|
||||
|
||||
nodeName := ""
|
||||
if u.Node != nil {
|
||||
nodeName = u.Node.Name
|
||||
}
|
||||
|
||||
if isBastion {
|
||||
// We don't want to validate for bastions - they aren't part of the cluster
|
||||
} else if rollingUpdateData.CloudOnly {
|
||||
|
||||
klog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.")
|
||||
|
||||
} else {
|
||||
|
||||
if u.Node != nil {
|
||||
klog.Infof("Draining the node: %q.", nodeName)
|
||||
|
||||
if err := r.DrainNode(u, rollingUpdateData); err != nil {
|
||||
if rollingUpdateData.FailOnDrainError {
|
||||
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
|
||||
}
|
||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
|
||||
}
|
||||
}
|
||||
|
||||
// We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned
|
||||
// (It often seems like GCE tries to re-use names)
|
||||
if !isBastion && !rollingUpdateData.CloudOnly {
|
||||
if u.Node == nil {
|
||||
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId)
|
||||
} else {
|
||||
klog.Infof("deleting node %q from kubernetes", nodeName)
|
||||
if err := r.deleteNode(u.Node, rollingUpdateData); err != nil {
|
||||
return fmt.Errorf("error deleting node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.DeleteInstance(u); err != nil {
|
||||
klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the minimum interval
|
||||
klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate)
|
||||
time.Sleep(sleepAfterTerminate)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateClusterWithDuration runs validation.ValidateCluster until either we get positive result or the timeout expires
|
||||
func (r *RollingUpdateInstanceGroup) validateClusterWithDuration(rollingUpdateData *RollingUpdateCluster, duration time.Duration) error {
|
||||
// Try to validate cluster at least once, this will handle durations that are lower
|
||||
|
|
|
|||
Loading…
Reference in New Issue