mirror of https://github.com/kubernetes/kops.git
Merge pull request #8239 from johngmyers/simplify-rolling
Simplify code for rolling updates of nodes
This commit is contained in:
commit
a22af4fa80
|
@ -447,12 +447,3 @@ func (r *RollingUpdateInstanceGroup) deleteNode(node *corev1.Node, rollingUpdate
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete a CloudInstanceGroups
|
|
||||||
func (r *RollingUpdateInstanceGroup) Delete() error {
|
|
||||||
if r.CloudGroup == nil {
|
|
||||||
return fmt.Errorf("group has to be set")
|
|
||||||
}
|
|
||||||
// TODO: Leaving func in place in order to cordon and drain nodes
|
|
||||||
return r.Cloud.DeleteGroup(r.CloudGroup)
|
|
||||||
}
|
|
||||||
|
|
|
@ -148,42 +148,28 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upgrade nodes, with greater parallelism
|
// Upgrade nodes
|
||||||
{
|
{
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
// We run nodes in series, even if they are in separate instance groups
|
// We run nodes in series, even if they are in separate instance groups
|
||||||
// typically they will not being separate instance groups. If you roll the nodes in parallel
|
// typically they will not being separate instance groups. If you roll the nodes in parallel
|
||||||
// you can get into a scenario where you can evict multiple statefulset pods from the same
|
// you can get into a scenario where you can evict multiple statefulset pods from the same
|
||||||
// statefulset at the same time. Further improvements needs to be made to protect from this as
|
// statefulset at the same time. Further improvements needs to be made to protect from this as
|
||||||
// well.
|
// well.
|
||||||
|
|
||||||
wg.Add(1)
|
for k := range nodeGroups {
|
||||||
|
results[k] = fmt.Errorf("function panic nodes")
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
for k, group := range nodeGroups {
|
||||||
for k := range nodeGroups {
|
g, err := NewRollingUpdateInstanceGroup(c.Cloud, group)
|
||||||
resultsMutex.Lock()
|
if err == nil {
|
||||||
results[k] = fmt.Errorf("function panic nodes")
|
err = g.RollingUpdate(c, cluster, false, c.NodeInterval, c.ValidationTimeout)
|
||||||
resultsMutex.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defer wg.Done()
|
results[k] = err
|
||||||
|
|
||||||
for k, group := range nodeGroups {
|
// TODO: Bail on error?
|
||||||
g, err := NewRollingUpdateInstanceGroup(c.Cloud, group)
|
}
|
||||||
if err == nil {
|
|
||||||
err = g.RollingUpdate(c, cluster, false, c.NodeInterval, c.ValidationTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
resultsMutex.Lock()
|
|
||||||
results[k] = err
|
|
||||||
resultsMutex.Unlock()
|
|
||||||
|
|
||||||
// TODO: Bail on error?
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, err := range results {
|
for _, err := range results {
|
||||||
|
|
Loading…
Reference in New Issue