mirror of https://github.com/kubernetes/kops.git
Don't pass ctx and cluster everywhere
This commit is contained in:
parent
d83f8e5e4d
commit
63f13322d5
|
@ -236,6 +236,7 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti
|
|||
}
|
||||
|
||||
d := &instancegroups.RollingUpdateCluster{
|
||||
Ctx: ctx,
|
||||
MasterInterval: 0,
|
||||
NodeInterval: 0,
|
||||
BastionInterval: 0,
|
||||
|
@ -264,7 +265,7 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti
|
|||
}
|
||||
d.ClusterValidator = clusterValidator
|
||||
|
||||
return d.UpdateSingleInstance(ctx, cloudMember, options.Surge)
|
||||
return d.UpdateSingleInstance(cloudMember, options.Surge)
|
||||
}
|
||||
|
||||
func deleteNodeMatch(cloudMember *cloudinstances.CloudInstance, options *deleteInstanceOptions) bool {
|
||||
|
|
|
@ -328,6 +328,8 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
|
|||
}
|
||||
|
||||
d := &instancegroups.RollingUpdateCluster{
|
||||
Ctx: ctx,
|
||||
Cluster: cluster,
|
||||
MasterInterval: options.MasterInterval,
|
||||
NodeInterval: options.NodeInterval,
|
||||
BastionInterval: options.BastionInterval,
|
||||
|
@ -347,7 +349,7 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
|
|||
ValidateSuccessDuration: 10 * time.Second,
|
||||
}
|
||||
|
||||
err = d.AdjustNeedUpdate(groups, cluster, list)
|
||||
err = d.AdjustNeedUpdate(groups, list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -430,5 +432,5 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
|
|||
}
|
||||
d.ClusterValidator = clusterValidator
|
||||
|
||||
return d.RollingUpdate(ctx, groups, cluster, list)
|
||||
return d.RollingUpdate(groups, list)
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func promptInteractive(upgradedHostID, upgradedHostName string) (stopPrompting b
|
|||
}
|
||||
|
||||
// RollingUpdate performs a rolling update on a list of instances.
|
||||
func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, cluster *api.Cluster, group *cloudinstances.CloudInstanceGroup, sleepAfterTerminate time.Duration) (err error) {
|
||||
func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(group *cloudinstances.CloudInstanceGroup, sleepAfterTerminate time.Duration) (err error) {
|
||||
isBastion := group.InstanceGroup.IsBastion()
|
||||
// Do not need a k8s client if you are doing cloudonly.
|
||||
if c.K8sClient == nil && !c.CloudOnly {
|
||||
|
@ -94,13 +94,13 @@ func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, c
|
|||
}
|
||||
|
||||
if !c.CloudOnly {
|
||||
err = c.taintAllNeedUpdate(ctx, group, update)
|
||||
err = c.taintAllNeedUpdate(group, update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
settings := resolveSettings(cluster, group.InstanceGroup, numInstances)
|
||||
settings := resolveSettings(c.Cluster, group.InstanceGroup, numInstances)
|
||||
|
||||
runningDrains := 0
|
||||
maxSurge := settings.MaxSurge.IntValue()
|
||||
|
@ -162,7 +162,7 @@ func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, c
|
|||
|
||||
for uIdx, u := range update {
|
||||
go func(m *cloudinstances.CloudInstance) {
|
||||
terminateChan <- c.drainTerminateAndWait(ctx, m, sleepAfterTerminate)
|
||||
terminateChan <- c.drainTerminateAndWait(m, sleepAfterTerminate)
|
||||
}(u)
|
||||
runningDrains++
|
||||
|
||||
|
@ -260,7 +260,7 @@ func waitForPendingBeforeReturningError(runningDrains int, terminateChan chan er
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *RollingUpdateCluster) taintAllNeedUpdate(ctx context.Context, group *cloudinstances.CloudInstanceGroup, update []*cloudinstances.CloudInstance) error {
|
||||
func (c *RollingUpdateCluster) taintAllNeedUpdate(group *cloudinstances.CloudInstanceGroup, update []*cloudinstances.CloudInstance) error {
|
||||
var toTaint []*corev1.Node
|
||||
for _, u := range update {
|
||||
if u.Node != nil && !u.Node.Spec.Unschedulable {
|
||||
|
@ -282,7 +282,7 @@ func (c *RollingUpdateCluster) taintAllNeedUpdate(ctx context.Context, group *cl
|
|||
}
|
||||
klog.Infof("Tainting %d %s in %q instancegroup.", len(toTaint), noun, group.InstanceGroup.Name)
|
||||
for _, n := range toTaint {
|
||||
if err := c.patchTaint(ctx, n); err != nil {
|
||||
if err := c.patchTaint(n); err != nil {
|
||||
if c.FailOnDrainError {
|
||||
return fmt.Errorf("failed to taint node %q: %v", n, err)
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func (c *RollingUpdateCluster) taintAllNeedUpdate(ctx context.Context, group *cl
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *RollingUpdateCluster) patchTaint(ctx context.Context, node *corev1.Node) error {
|
||||
func (c *RollingUpdateCluster) patchTaint(node *corev1.Node) error {
|
||||
oldData, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -314,14 +314,14 @@ func (c *RollingUpdateCluster) patchTaint(ctx context.Context, node *corev1.Node
|
|||
return err
|
||||
}
|
||||
|
||||
_, err = c.K8sClient.CoreV1().Nodes().Patch(ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
_, err = c.K8sClient.CoreV1().Nodes().Patch(c.Ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstance, sleepAfterTerminate time.Duration) error {
|
||||
func (c *RollingUpdateCluster) drainTerminateAndWait(u *cloudinstances.CloudInstance, sleepAfterTerminate time.Duration) error {
|
||||
instanceID := u.ID
|
||||
|
||||
nodeName := ""
|
||||
|
@ -360,7 +360,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *clo
|
|||
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceID)
|
||||
} else {
|
||||
klog.Infof("deleting node %q from kubernetes", nodeName)
|
||||
if err := c.deleteNode(ctx, u.Node); err != nil {
|
||||
if err := c.deleteNode(u.Node); err != nil {
|
||||
return fmt.Errorf("error deleting node %q: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
|
@ -551,9 +551,9 @@ func (c *RollingUpdateCluster) drainNode(u *cloudinstances.CloudInstance) error
|
|||
}
|
||||
|
||||
// deleteNode deletes a node from the k8s API. It does not delete the underlying instance.
|
||||
func (c *RollingUpdateCluster) deleteNode(ctx context.Context, node *corev1.Node) error {
|
||||
func (c *RollingUpdateCluster) deleteNode(node *corev1.Node) error {
|
||||
var options metav1.DeleteOptions
|
||||
err := c.K8sClient.CoreV1().Nodes().Delete(ctx, node.Name, options)
|
||||
err := c.K8sClient.CoreV1().Nodes().Delete(c.Ctx, node.Name, options)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
|
@ -566,7 +566,7 @@ func (c *RollingUpdateCluster) deleteNode(ctx context.Context, node *corev1.Node
|
|||
}
|
||||
|
||||
// UpdateSingeInstance performs a rolling update on a single instance
|
||||
func (c *RollingUpdateCluster) UpdateSingleInstance(ctx context.Context, cloudMember *cloudinstances.CloudInstance, detach bool) error {
|
||||
func (c *RollingUpdateCluster) UpdateSingleInstance(cloudMember *cloudinstances.CloudInstance, detach bool) error {
|
||||
if detach {
|
||||
if cloudMember.CloudInstanceGroup.InstanceGroup.IsMaster() {
|
||||
klog.Warning("cannot detach master instances. Assuming --surge=false")
|
||||
|
@ -581,5 +581,5 @@ func (c *RollingUpdateCluster) UpdateSingleInstance(ctx context.Context, cloudMe
|
|||
}
|
||||
}
|
||||
|
||||
return c.drainTerminateAndWait(ctx, cloudMember, 0)
|
||||
return c.drainTerminateAndWait(cloudMember, 0)
|
||||
}
|
||||
|
|
|
@ -33,7 +33,9 @@ import (
|
|||
|
||||
// RollingUpdateCluster is a struct containing cluster information for a rolling update.
|
||||
type RollingUpdateCluster struct {
|
||||
Cloud fi.Cloud
|
||||
Ctx context.Context
|
||||
Cluster *api.Cluster
|
||||
Cloud fi.Cloud
|
||||
|
||||
// MasterInterval is the amount of time to wait after stopping a master instance
|
||||
MasterInterval time.Duration
|
||||
|
@ -75,7 +77,7 @@ type RollingUpdateCluster struct {
|
|||
}
|
||||
|
||||
// AdjustNeedUpdate adjusts the set of instances that need updating, using factors outside those known by the cloud implementation
|
||||
func (c *RollingUpdateCluster) AdjustNeedUpdate(groups map[string]*cloudinstances.CloudInstanceGroup, cluster *api.Cluster, instanceGroups *api.InstanceGroupList) error {
|
||||
func (c *RollingUpdateCluster) AdjustNeedUpdate(groups map[string]*cloudinstances.CloudInstanceGroup, instanceGroups *api.InstanceGroupList) error {
|
||||
for _, group := range groups {
|
||||
if group.Ready != nil {
|
||||
var newReady []*cloudinstances.CloudInstance
|
||||
|
@ -101,7 +103,7 @@ func (c *RollingUpdateCluster) AdjustNeedUpdate(groups map[string]*cloudinstance
|
|||
}
|
||||
|
||||
// RollingUpdate performs a rolling update on a K8s Cluster.
|
||||
func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[string]*cloudinstances.CloudInstanceGroup, cluster *api.Cluster, instanceGroups *api.InstanceGroupList) error {
|
||||
func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.CloudInstanceGroup, instanceGroups *api.InstanceGroupList) error {
|
||||
if len(groups) == 0 {
|
||||
klog.Info("Cloud Instance Group length is zero. Not doing a rolling-update.")
|
||||
return nil
|
||||
|
@ -139,7 +141,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str
|
|||
|
||||
defer wg.Done()
|
||||
|
||||
err := c.rollingUpdateInstanceGroup(ctx, cluster, bastionGroups[k], c.BastionInterval)
|
||||
err := c.rollingUpdateInstanceGroup(bastionGroups[k], c.BastionInterval)
|
||||
|
||||
resultsMutex.Lock()
|
||||
results[k] = err
|
||||
|
@ -164,7 +166,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str
|
|||
// and we don't want to roll all the masters at the same time. See issue #284
|
||||
|
||||
for _, k := range sortGroups(masterGroups) {
|
||||
err := c.rollingUpdateInstanceGroup(ctx, cluster, masterGroups[k], c.MasterInterval)
|
||||
err := c.rollingUpdateInstanceGroup(masterGroups[k], c.MasterInterval)
|
||||
|
||||
// Do not continue update if master(s) failed, cluster is potentially in an unhealthy state
|
||||
if err != nil {
|
||||
|
@ -186,7 +188,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str
|
|||
}
|
||||
|
||||
for _, k := range sortGroups(nodeGroups) {
|
||||
err := c.rollingUpdateInstanceGroup(ctx, cluster, nodeGroups[k], c.NodeInterval)
|
||||
err := c.rollingUpdateInstanceGroup(nodeGroups[k], c.NodeInterval)
|
||||
|
||||
results[k] = err
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ const (
|
|||
taintPatch = "{\"spec\":{\"taints\":[{\"effect\":\"PreferNoSchedule\",\"key\":\"kops.k8s.io/scheduled-for-update\"}]}}"
|
||||
)
|
||||
|
||||
func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud, *kopsapi.Cluster) {
|
||||
func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud) {
|
||||
k8sClient := fake.NewSimpleClientset()
|
||||
|
||||
mockcloud := awsup.BuildMockAWSCloud("us-east-1", "abc")
|
||||
|
@ -61,6 +61,8 @@ func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud, *kopsapi.Cluste
|
|||
cluster.Name = "test.k8s.local"
|
||||
|
||||
c := &RollingUpdateCluster{
|
||||
Ctx: context.Background(),
|
||||
Cluster: cluster,
|
||||
Cloud: mockcloud,
|
||||
MasterInterval: 1 * time.Millisecond,
|
||||
NodeInterval: 1 * time.Millisecond,
|
||||
|
@ -74,7 +76,7 @@ func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud, *kopsapi.Cluste
|
|||
ValidateCount: 2,
|
||||
}
|
||||
|
||||
return c, mockcloud, cluster
|
||||
return c, mockcloud
|
||||
}
|
||||
|
||||
type successfulClusterValidator struct{}
|
||||
|
@ -181,12 +183,11 @@ func getGroupsAllNeedUpdate(k8sClient kubernetes.Interface, cloud awsup.AWSCloud
|
|||
}
|
||||
|
||||
func TestRollingUpdateAllNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
cordoned := ""
|
||||
|
@ -230,15 +231,14 @@ func TestRollingUpdateAllNeedUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateAllNeedUpdateCloudonly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.CloudOnly = true
|
||||
c.ClusterValidator = &assertNotCalledClusterValidator{T: t}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assert.Empty(t, c.K8sClient.(*fake.Clientset).Actions())
|
||||
|
@ -250,15 +250,14 @@ func TestRollingUpdateAllNeedUpdateCloudonly(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateAllNeedUpdateNoFailOnValidate(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.FailOnValidate = false
|
||||
c.ClusterValidator = &failingClusterValidator{}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
asgGroups, _ := cloud.Autoscaling().DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{})
|
||||
|
@ -268,12 +267,11 @@ func TestRollingUpdateAllNeedUpdateNoFailOnValidate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateNoneNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
groups := getGroups(c.K8sClient, cloud)
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assert.Empty(t, c.K8sClient.(*fake.Clientset).Actions())
|
||||
|
@ -285,14 +283,13 @@ func TestRollingUpdateNoneNeedUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateNoneNeedUpdateWithForce(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
groups := getGroups(c.K8sClient, cloud)
|
||||
|
||||
c.Force = true
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
asgGroups, _ := cloud.Autoscaling().DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{})
|
||||
|
@ -302,13 +299,12 @@ func TestRollingUpdateNoneNeedUpdateWithForce(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateEmptyGroup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, _ := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, &kopsapi.Cluster{}, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -318,14 +314,13 @@ func TestRollingUpdateEmptyGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateUnknownRole(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
groups := getGroups(c.K8sClient, cloud)
|
||||
|
||||
groups["node-1"].InstanceGroup.Spec.Role = "Unknown"
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -335,14 +330,13 @@ func TestRollingUpdateUnknownRole(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateAllNeedUpdateFailsValidation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &failingClusterValidator{}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -352,14 +346,13 @@ func TestRollingUpdateAllNeedUpdateFailsValidation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateAllNeedUpdateErrorsValidation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &erroringClusterValidator{}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -369,30 +362,28 @@ func TestRollingUpdateAllNeedUpdateErrorsValidation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateNodes1NeedsUpdateFailsValidation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &failingClusterValidator{}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
}
|
||||
|
||||
func TestRollingUpdateNodes1NeedsUpdateErrorsValidation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &erroringClusterValidator{}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -428,9 +419,8 @@ func (v *failAfterOneNodeClusterValidator) Validate() (*validation.ValidationClu
|
|||
}
|
||||
|
||||
func TestRollingUpdateClusterFailsValidationAfterOneMaster(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &failAfterOneNodeClusterValidator{
|
||||
Cloud: cloud,
|
||||
|
@ -439,7 +429,7 @@ func TestRollingUpdateClusterFailsValidationAfterOneMaster(t *testing.T) {
|
|||
}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -449,9 +439,8 @@ func TestRollingUpdateClusterFailsValidationAfterOneMaster(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateClusterErrorsValidationAfterOneMaster(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &failAfterOneNodeClusterValidator{
|
||||
Cloud: cloud,
|
||||
|
@ -460,7 +449,7 @@ func TestRollingUpdateClusterErrorsValidationAfterOneMaster(t *testing.T) {
|
|||
}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -470,9 +459,8 @@ func TestRollingUpdateClusterErrorsValidationAfterOneMaster(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateClusterFailsValidationAfterOneNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &failAfterOneNodeClusterValidator{
|
||||
Cloud: cloud,
|
||||
|
@ -482,16 +470,15 @@ func TestRollingUpdateClusterFailsValidationAfterOneNode(t *testing.T) {
|
|||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 2)
|
||||
}
|
||||
|
||||
func TestRollingUpdateClusterErrorsValidationAfterOneNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
c.ClusterValidator = &failAfterOneNodeClusterValidator{
|
||||
Cloud: cloud,
|
||||
|
@ -501,7 +488,7 @@ func TestRollingUpdateClusterErrorsValidationAfterOneNode(t *testing.T) {
|
|||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.Error(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 2)
|
||||
|
@ -543,9 +530,8 @@ func (v *flappingClusterValidator) Validate() (*validation.ValidationCluster, er
|
|||
}
|
||||
|
||||
func TestRollingUpdateFlappingValidation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
// This should only take a few milliseconds,
|
||||
// but we have to pad to allow for random delays (e.g. GC)
|
||||
|
@ -558,7 +544,7 @@ func TestRollingUpdateFlappingValidation(t *testing.T) {
|
|||
}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 0)
|
||||
|
@ -588,9 +574,8 @@ func (v *failThreeTimesClusterValidator) Validate() (*validation.ValidationClust
|
|||
}
|
||||
|
||||
func TestRollingUpdateValidatesAfterBastion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
// This should only take a few milliseconds,
|
||||
// but we have to pad to allow for random delays (e.g. GC)
|
||||
|
@ -600,7 +585,7 @@ func TestRollingUpdateValidatesAfterBastion(t *testing.T) {
|
|||
c.ClusterValidator = &failThreeTimesClusterValidator{}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 0)
|
||||
|
@ -632,7 +617,7 @@ func addNeedsUpdateAnnotation(group *cloudinstances.CloudInstanceGroup, node str
|
|||
}
|
||||
|
||||
func TestAddAnnotatedNodesToNeedsUpdate(t *testing.T) {
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 2, 1)
|
||||
|
@ -643,7 +628,7 @@ func TestAddAnnotatedNodesToNeedsUpdate(t *testing.T) {
|
|||
addNeedsUpdateAnnotation(groups["node-2"], "node-2a")
|
||||
addNeedsUpdateAnnotation(groups["master-1"], "master-1b")
|
||||
|
||||
err := c.AdjustNeedUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.AdjustNeedUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "AddAnnotatedNodesToGroups")
|
||||
|
||||
assertGroupNeedUpdate(t, groups, "node-1", "node-1a", "node-1b")
|
||||
|
@ -652,7 +637,7 @@ func TestAddAnnotatedNodesToNeedsUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddAnnotatedNodesToNeedsUpdateCloudonly(t *testing.T) {
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 2, 1)
|
||||
|
@ -666,7 +651,7 @@ func TestAddAnnotatedNodesToNeedsUpdateCloudonly(t *testing.T) {
|
|||
c.CloudOnly = true
|
||||
c.ClusterValidator = &assertNotCalledClusterValidator{T: t}
|
||||
|
||||
err := c.AdjustNeedUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.AdjustNeedUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "AddAnnotatedNodesToGroups")
|
||||
|
||||
assertGroupNeedUpdate(t, groups, "node-1", "node-1a", "node-1b")
|
||||
|
@ -675,7 +660,7 @@ func TestAddAnnotatedNodesToNeedsUpdateCloudonly(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddAnnotatedNodesToNeedsUpdateNodesMissing(t *testing.T) {
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 2, 1)
|
||||
|
@ -683,7 +668,7 @@ func TestAddAnnotatedNodesToNeedsUpdateNodesMissing(t *testing.T) {
|
|||
groups["node-1"].Ready[0].Node = nil
|
||||
groups["node-1"].NeedUpdate[0].Node = nil
|
||||
|
||||
err := c.AdjustNeedUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.AdjustNeedUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "AddAnnotatedNodesToGroups")
|
||||
}
|
||||
|
||||
|
@ -707,13 +692,12 @@ func assertGroupNeedUpdate(t *testing.T, groups map[string]*cloudinstances.Cloud
|
|||
}
|
||||
|
||||
func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 2)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
cordoned := ""
|
||||
|
@ -749,18 +733,17 @@ func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxSurgeIgnoredForMaster(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxSurge: &two,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 3, 2)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
cordoned := ""
|
||||
|
@ -797,36 +780,16 @@ func TestRollingUpdateMaxSurgeIgnoredForMaster(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateDisabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
DrainAndTerminate: fi.Bool(false),
|
||||
}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
assertGroupInstanceCount(t, cloud, "node-2", 3)
|
||||
assertGroupInstanceCount(t, cloud, "master-1", 2)
|
||||
assertGroupInstanceCount(t, cloud, "bastion-1", 1)
|
||||
}
|
||||
|
||||
func TestRollingUpdateDisabledCloudonly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
c.CloudOnly = true
|
||||
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
DrainAndTerminate: fi.Bool(false),
|
||||
}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -854,9 +817,8 @@ func (m *disabledSurgeTest) DetachInstances(input *autoscaling.DetachInstancesIn
|
|||
}
|
||||
|
||||
func TestRollingUpdateDisabledSurge(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
disabledSurgeTest := &disabledSurgeTest{
|
||||
AutoScalingAPI: cloud.MockAutoscaling,
|
||||
|
@ -866,13 +828,13 @@ func TestRollingUpdateDisabledSurge(t *testing.T) {
|
|||
cloud.MockEC2 = &ec2IgnoreTags{EC2API: cloud.MockEC2}
|
||||
|
||||
one := intstr.FromInt(1)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
DrainAndTerminate: fi.Bool(false),
|
||||
MaxSurge: &one,
|
||||
}
|
||||
|
||||
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 3)
|
||||
|
@ -1067,9 +1029,8 @@ func newConcurrentTest(t *testing.T, cloud *awsup.MockAWSCloud, numSurge int, al
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
concurrentTest := newConcurrentTest(t, cloud, 0, true)
|
||||
c.ValidateCount = 1
|
||||
|
@ -1077,14 +1038,14 @@ func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) {
|
|||
cloud.MockEC2 = concurrentTest
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxUnavailable: &two,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 7)
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 0)
|
||||
|
@ -1092,9 +1053,8 @@ func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
concurrentTest := newConcurrentTest(t, cloud, 0, false)
|
||||
c.ValidateCount = 1
|
||||
|
@ -1102,13 +1062,13 @@ func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) {
|
|||
cloud.MockEC2 = concurrentTest
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxUnavailable: &two,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 6)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 1)
|
||||
|
@ -1116,9 +1076,8 @@ func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxUnavailableAllNeedUpdateMaster(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
concurrentTest := newConcurrentTest(t, cloud, 0, true)
|
||||
c.ValidateCount = 1
|
||||
|
@ -1126,14 +1085,14 @@ func TestRollingUpdateMaxUnavailableAllNeedUpdateMaster(t *testing.T) {
|
|||
cloud.MockEC2 = concurrentTest
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxUnavailable: &two,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 7, 7)
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "master-1", 0)
|
||||
|
@ -1170,9 +1129,8 @@ func (e *ec2IgnoreTags) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput,
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxSurgeAllNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
concurrentTest := newConcurrentTest(t, cloud, 2, true)
|
||||
c.ValidateCount = 1
|
||||
|
@ -1184,14 +1142,14 @@ func TestRollingUpdateMaxSurgeAllNeedUpdate(t *testing.T) {
|
|||
cloud.MockEC2 = &ec2IgnoreTags{EC2API: concurrentTest}
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxSurge: &two,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 6, 6)
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 0)
|
||||
|
@ -1199,9 +1157,8 @@ func TestRollingUpdateMaxSurgeAllNeedUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxSurgeAllButOneNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
concurrentTest := newConcurrentTest(t, cloud, 2, false)
|
||||
c.ValidateCount = 1
|
||||
|
@ -1213,13 +1170,13 @@ func TestRollingUpdateMaxSurgeAllButOneNeedUpdate(t *testing.T) {
|
|||
cloud.MockEC2 = &ec2IgnoreTags{EC2API: concurrentTest}
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxSurge: &two,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 6)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 1)
|
||||
|
@ -1237,22 +1194,21 @@ func (c *countDetach) DetachInstances(input *autoscaling.DetachInstancesInput) (
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxSurgeGreaterThanNeedUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
countDetach := &countDetach{AutoScalingAPI: cloud.MockAutoscaling}
|
||||
cloud.MockAutoscaling = countDetach
|
||||
cloud.MockEC2 = &ec2IgnoreTags{EC2API: cloud.MockEC2}
|
||||
|
||||
ten := intstr.FromInt(10)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxSurge: &ten,
|
||||
}
|
||||
|
||||
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
|
||||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 2)
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 1)
|
||||
|
@ -1344,9 +1300,8 @@ func (m *alreadyDetachedTestAutoscaling) DetachInstances(input *autoscaling.Deta
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
alreadyDetachedTest := &alreadyDetachedTest{
|
||||
EC2API: cloud.MockEC2,
|
||||
|
@ -1364,7 +1319,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) {
|
|||
cloud.MockEC2 = &ec2IgnoreTags{EC2API: alreadyDetachedTest}
|
||||
|
||||
three := intstr.FromInt(3)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxSurge: &three,
|
||||
}
|
||||
|
||||
|
@ -1372,7 +1327,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) {
|
|||
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 4, 4)
|
||||
alreadyDetachedTest.detached[groups["node-1"].NeedUpdate[3].ID] = true
|
||||
groups["node-1"].NeedUpdate[3].Status = cloudinstances.CloudInstanceStatusDetached
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 0)
|
||||
|
@ -1380,9 +1335,8 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRollingUpdateMaxSurgeAllNeedUpdateMaxAlreadyDetached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, cloud, cluster := getTestSetup()
|
||||
c, cloud := getTestSetup()
|
||||
|
||||
// Should behave the same as TestRollingUpdateMaxUnavailableAllNeedUpdate
|
||||
concurrentTest := newConcurrentTest(t, cloud, 0, true)
|
||||
|
@ -1391,7 +1345,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateMaxAlreadyDetached(t *testing.T) {
|
|||
cloud.MockEC2 = concurrentTest
|
||||
|
||||
two := intstr.FromInt(2)
|
||||
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
|
||||
MaxSurge: &two,
|
||||
}
|
||||
|
||||
|
@ -1406,7 +1360,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateMaxAlreadyDetached(t *testing.T) {
|
|||
groups["node-1"].NeedUpdate[6].Status = cloudinstances.CloudInstanceStatusNeedsUpdate
|
||||
// TODO verify those are the last two instances terminated
|
||||
|
||||
err := c.RollingUpdate(ctx, groups, cluster, &kopsapi.InstanceGroupList{})
|
||||
err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{})
|
||||
assert.NoError(t, err, "rolling update")
|
||||
|
||||
assertGroupInstanceCount(t, cloud, "node-1", 0)
|
||||
|
|
Loading…
Reference in New Issue