fix the pod-recreate issue in partition style
Signed-off-by: yunbo <yunbo10124scut@gmail.com>
This commit is contained in:
parent
efbd8ba8f9
commit
8a8fa47acb
|
|
@ -154,7 +154,7 @@ func ConversionToV1beta1TrafficRoutingRef(src TrafficRoutingRef) (dst v1beta1.Tr
|
|||
|
||||
func ConversionToV1beta1TrafficRoutingStrategy(src TrafficRoutingStrategy) (dst v1beta1.TrafficRoutingStrategy) {
|
||||
if src.Weight != nil {
|
||||
dst.Traffic = utilpointer.StringPtr(fmt.Sprintf("%d", *src.Weight) + "%")
|
||||
dst.Traffic = utilpointer.String(fmt.Sprintf("%d", *src.Weight) + "%")
|
||||
}
|
||||
dst.RequestHeaderModifier = src.RequestHeaderModifier
|
||||
for _, match := range src.Matches {
|
||||
|
|
|
|||
|
|
@ -462,9 +462,9 @@ func (r *RolloutStatus) GetSubStatus() *CommonStatus {
|
|||
return nil
|
||||
}
|
||||
if r.CanaryStatus != nil {
|
||||
return &(r.CanaryStatus.CommonStatus)
|
||||
return &r.CanaryStatus.CommonStatus
|
||||
}
|
||||
return &(r.BlueGreenStatus.CommonStatus)
|
||||
return &r.BlueGreenStatus.CommonStatus
|
||||
}
|
||||
|
||||
func (r *RolloutStatus) IsSubStatusEmpty() bool {
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ var (
|
|||
},
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(100),
|
||||
Replicas: pointer.Int32(100),
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
|
|
@ -146,7 +146,7 @@ var (
|
|||
Name: "sample",
|
||||
},
|
||||
ReleasePlan: v1beta1.ReleasePlan{
|
||||
BatchPartition: pointer.Int32Ptr(0),
|
||||
BatchPartition: pointer.Int32(0),
|
||||
RollingStyle: v1beta1.PartitionRollingStyle,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
|
|
@ -178,7 +178,7 @@ var (
|
|||
},
|
||||
},
|
||||
Spec: kruiseappsv1alpha1.CloneSetSpec{
|
||||
Replicas: pointer.Int32Ptr(100),
|
||||
Replicas: pointer.Int32(100),
|
||||
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
|
||||
Partition: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
|
||||
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(2)},
|
||||
|
|
@ -394,7 +394,7 @@ func TestReconcile_CloneSet(t *testing.T) {
|
|||
},
|
||||
GetCloneSet: func() []client.Object {
|
||||
stable := getStableWithReady(stableClone, "v2").(*kruiseappsv1alpha1.CloneSet)
|
||||
stable.Spec.Replicas = pointer.Int32Ptr(200)
|
||||
stable.Spec.Replicas = pointer.Int32(200)
|
||||
canary := getCanaryWithStage(stable, "v2", 0, true)
|
||||
return []client.Object{
|
||||
canary,
|
||||
|
|
@ -475,7 +475,7 @@ func TestReconcile_CloneSet(t *testing.T) {
|
|||
release.Status.UpdateRevision = util.ComputeHash(canaryTemplate, nil)
|
||||
release.Status.CanaryStatus.UpdatedReplicas = 10
|
||||
release.Status.CanaryStatus.UpdatedReadyReplicas = 10
|
||||
release.Spec.ReleasePlan.BatchPartition = pointer.Int32Ptr(1)
|
||||
release.Spec.ReleasePlan.BatchPartition = pointer.Int32(1)
|
||||
release.Status.ObservedReleasePlanHash = util.HashReleasePlanBatches(&release.Spec.ReleasePlan)
|
||||
return release
|
||||
},
|
||||
|
|
@ -651,7 +651,7 @@ func TestReconcile_Deployment(t *testing.T) {
|
|||
release := releaseDeploy.DeepCopy()
|
||||
release.Status.CanaryStatus.UpdatedReplicas = 10
|
||||
release.Status.CanaryStatus.UpdatedReadyReplicas = 10
|
||||
release.Spec.ReleasePlan.BatchPartition = pointer.Int32Ptr(1)
|
||||
release.Spec.ReleasePlan.BatchPartition = pointer.Int32(1)
|
||||
return setState(release, v1beta1.ReadyBatchState)
|
||||
},
|
||||
GetDeployments: func() []client.Object {
|
||||
|
|
@ -694,7 +694,7 @@ func TestReconcile_Deployment(t *testing.T) {
|
|||
},
|
||||
GetDeployments: func() []client.Object {
|
||||
stable := getStableWithReady(stableDeploy, "v2").(*apps.Deployment)
|
||||
stable.Spec.Replicas = pointer.Int32Ptr(200)
|
||||
stable.Spec.Replicas = pointer.Int32(200)
|
||||
canary := getCanaryWithStage(stable, "v2", 0, true)
|
||||
return []client.Object{
|
||||
stable, canary,
|
||||
|
|
@ -891,7 +891,7 @@ func getCanaryWithStage(workload client.Object, version string, stage int, ready
|
|||
d.ResourceVersion = strconv.Itoa(rand.Intn(100000000000))
|
||||
d.Labels[util.CanaryDeploymentLabel] = "87076677"
|
||||
d.Finalizers = []string{util.CanaryDeploymentFinalizer}
|
||||
d.Spec.Replicas = pointer.Int32Ptr(int32(stageReplicas))
|
||||
d.Spec.Replicas = pointer.Int32(int32(stageReplicas))
|
||||
d.Spec.Template.Spec.Containers = containers(version)
|
||||
d.Status.Replicas = int32(stageReplicas)
|
||||
d.Status.ReadyReplicas = int32(stageReplicas)
|
||||
|
|
|
|||
|
|
@ -91,14 +91,14 @@ func TestWorkloadEventHandler_Update(t *testing.T) {
|
|||
oldObject := getStableWithReady(stableDeploy, "v2").(*apps.Deployment)
|
||||
oldObject.SetGeneration(2)
|
||||
oldObject.Status.ObservedGeneration = 2
|
||||
oldObject.Spec.Replicas = pointer.Int32Ptr(1000)
|
||||
oldObject.Spec.Replicas = pointer.Int32(1000)
|
||||
return oldObject
|
||||
},
|
||||
GetNewWorkload: func() client.Object {
|
||||
newObject := getStableWithReady(stableDeploy, "v2").(*apps.Deployment)
|
||||
newObject.SetGeneration(2)
|
||||
newObject.Status.ObservedGeneration = 2
|
||||
newObject.Spec.Replicas = pointer.Int32Ptr(1000)
|
||||
newObject.Spec.Replicas = pointer.Int32(1000)
|
||||
newObject.Status.Replicas = 1000
|
||||
return newObject
|
||||
},
|
||||
|
|
|
|||
|
|
@ -305,7 +305,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
workload: func() *kruiseappsv1alpha1.CloneSet {
|
||||
return &kruiseappsv1alpha1.CloneSet{
|
||||
Spec: kruiseappsv1alpha1.CloneSetSpec{
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
|
||||
MaxSurge: func() *intstr.IntOrString { p := intstr.FromInt(1); return &p }(),
|
||||
},
|
||||
|
|
@ -404,7 +404,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
workload: func() *kruiseappsv1alpha1.CloneSet {
|
||||
return &kruiseappsv1alpha1.CloneSet{
|
||||
Spec: kruiseappsv1alpha1.CloneSetSpec{
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
|
||||
MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.Even
|
|||
newStatus: newStatus,
|
||||
Interface: f(cli, key, gvk),
|
||||
release: release.DeepCopy(),
|
||||
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)),
|
||||
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release), release.Spec.ReleasePlan.Batches),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -439,7 +439,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
AvailableReplicas: 9,
|
||||
ReadyReplicas: 9,
|
||||
}
|
||||
deployment.Spec.Replicas = pointer.Int32Ptr(5)
|
||||
deployment.Spec.Replicas = pointer.Int32(5)
|
||||
// current partition, ie. maxSurge
|
||||
deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "90%"}
|
||||
newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.Even
|
|||
newStatus: newStatus,
|
||||
Interface: f(cli, key),
|
||||
release: release.DeepCopy(),
|
||||
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)),
|
||||
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release), release.Spec.ReleasePlan.Batches),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ func (r *realCanaryController) create(release *v1beta1.BatchRelease, template *a
|
|||
canary.Spec.Template.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
canary.Spec.Replicas = pointer.Int32Ptr(0)
|
||||
canary.Spec.Replicas = pointer.Int32(0)
|
||||
canary.Spec.Paused = false
|
||||
|
||||
if err := r.canaryClient.Create(context.TODO(), canary); err != nil {
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ var (
|
|||
UpdatedReplicas: 10,
|
||||
ReadyReplicas: 10,
|
||||
AvailableReplicas: 10,
|
||||
CollisionCount: pointer.Int32Ptr(1),
|
||||
CollisionCount: pointer.Int32(1),
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
}
|
||||
|
|
@ -163,7 +163,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
workload: func() (*apps.Deployment, *apps.Deployment) {
|
||||
stable := &apps.Deployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
},
|
||||
Status: apps.DeploymentStatus{
|
||||
Replicas: 10,
|
||||
|
|
@ -173,7 +173,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
}
|
||||
canary := &apps.Deployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(5),
|
||||
Replicas: pointer.Int32(5),
|
||||
},
|
||||
Status: apps.DeploymentStatus{
|
||||
Replicas: 5,
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ var (
|
|||
UpdateRevision: "version-2",
|
||||
CurrentRevision: "version-1",
|
||||
ObservedGeneration: 1,
|
||||
CollisionCount: pointer.Int32Ptr(1),
|
||||
CollisionCount: pointer.Int32(1),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
workload: func() *kruiseappsv1alpha1.CloneSet {
|
||||
return &kruiseappsv1alpha1.CloneSet{
|
||||
Spec: kruiseappsv1alpha1.CloneSetSpec{
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
|
||||
Partition: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(),
|
||||
},
|
||||
|
|
@ -211,7 +211,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
workload: func() *kruiseappsv1alpha1.CloneSet {
|
||||
return &kruiseappsv1alpha1.CloneSet{
|
||||
Spec: kruiseappsv1alpha1.CloneSetSpec{
|
||||
Replicas: pointer.Int32Ptr(20),
|
||||
Replicas: pointer.Int32(20),
|
||||
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
|
||||
Partition: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(),
|
||||
},
|
||||
|
|
@ -253,7 +253,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
Replicas: 20,
|
||||
UpdatedReplicas: 10,
|
||||
UpdatedReadyReplicas: 10,
|
||||
NoNeedUpdatedReplicas: pointer.Int32Ptr(10),
|
||||
NoNeedUpdatedReplicas: pointer.Int32(10),
|
||||
PlannedUpdatedReplicas: 4,
|
||||
DesiredUpdatedReplicas: 12,
|
||||
CurrentPartition: intstr.FromString("100%"),
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.Even
|
|||
newStatus: newStatus,
|
||||
Interface: f(cli, key, gvk),
|
||||
release: release.DeepCopy(),
|
||||
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)),
|
||||
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release), release.Spec.ReleasePlan.Batches),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "foo"}},
|
||||
UpdateStrategy: kruiseappsv1alpha1.DaemonSetUpdateStrategy{
|
||||
RollingUpdate: &kruiseappsv1alpha1.RollingUpdateDaemonSet{
|
||||
Partition: pointer.Int32Ptr(10),
|
||||
Partition: pointer.Int32(10),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -233,7 +233,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "foo"}},
|
||||
UpdateStrategy: kruiseappsv1alpha1.DaemonSetUpdateStrategy{
|
||||
RollingUpdate: &kruiseappsv1alpha1.RollingUpdateDaemonSet{
|
||||
Partition: pointer.Int32Ptr(10),
|
||||
Partition: pointer.Int32(10),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -288,7 +288,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
Replicas: 10,
|
||||
UpdatedReplicas: 5,
|
||||
UpdatedReadyReplicas: 5,
|
||||
NoNeedUpdatedReplicas: pointer.Int32Ptr(5),
|
||||
NoNeedUpdatedReplicas: pointer.Int32(5),
|
||||
PlannedUpdatedReplicas: 2,
|
||||
DesiredUpdatedReplicas: 6,
|
||||
CurrentPartition: intstr.FromInt(10),
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ var (
|
|||
UpdatedReplicas: 10,
|
||||
ReadyReplicas: 10,
|
||||
AvailableReplicas: 10,
|
||||
CollisionCount: pointer.Int32Ptr(1),
|
||||
CollisionCount: pointer.Int32(1),
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
}
|
||||
|
|
@ -178,7 +178,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
},
|
||||
Status: apps.DeploymentStatus{
|
||||
Replicas: 10,
|
||||
|
|
@ -242,7 +242,7 @@ func TestCalculateBatchContext(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(5),
|
||||
Replicas: pointer.Int32(5),
|
||||
},
|
||||
Status: apps.DeploymentStatus{
|
||||
Replicas: 5,
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ var (
|
|||
CurrentRevision: "version-1",
|
||||
ObservedGeneration: 1,
|
||||
UpdatedReadyReplicas: 0,
|
||||
CollisionCount: pointer.Int32Ptr(1),
|
||||
CollisionCount: pointer.Int32(1),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -183,11 +183,11 @@ func TestCalculateBatchContextForNativeStatefulSet(t *testing.T) {
|
|||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "foo"}},
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
UpdateStrategy: apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{
|
||||
Partition: pointer.Int32Ptr(100),
|
||||
Partition: pointer.Int32(100),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -258,11 +258,11 @@ func TestCalculateBatchContextForNativeStatefulSet(t *testing.T) {
|
|||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "foo"}},
|
||||
Replicas: pointer.Int32Ptr(20),
|
||||
Replicas: pointer.Int32(20),
|
||||
UpdateStrategy: apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{
|
||||
Partition: pointer.Int32Ptr(100),
|
||||
Partition: pointer.Int32(100),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -312,7 +312,7 @@ func TestCalculateBatchContextForNativeStatefulSet(t *testing.T) {
|
|||
Replicas: 20,
|
||||
UpdatedReplicas: 10,
|
||||
UpdatedReadyReplicas: 10,
|
||||
NoNeedUpdatedReplicas: pointer.Int32Ptr(10),
|
||||
NoNeedUpdatedReplicas: pointer.Int32(10),
|
||||
PlannedUpdatedReplicas: 4,
|
||||
DesiredUpdatedReplicas: 12,
|
||||
CurrentPartition: intstr.FromInt(100),
|
||||
|
|
@ -377,11 +377,11 @@ func TestCalculateBatchContextForAdvancedStatefulSet(t *testing.T) {
|
|||
},
|
||||
Spec: kruiseappsv1beta1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "foo"}},
|
||||
Replicas: pointer.Int32Ptr(10),
|
||||
Replicas: pointer.Int32(10),
|
||||
UpdateStrategy: kruiseappsv1beta1.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &kruiseappsv1beta1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: pointer.Int32Ptr(100),
|
||||
Partition: pointer.Int32(100),
|
||||
UnorderedUpdate: &kruiseappsv1beta1.UnorderedUpdateStrategy{
|
||||
PriorityStrategy: &appsv1pub.UpdatePriorityStrategy{
|
||||
OrderPriority: []appsv1pub.UpdatePriorityOrderTerm{
|
||||
|
|
@ -461,11 +461,11 @@ func TestCalculateBatchContextForAdvancedStatefulSet(t *testing.T) {
|
|||
},
|
||||
Spec: kruiseappsv1beta1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "foo"}},
|
||||
Replicas: pointer.Int32Ptr(20),
|
||||
Replicas: pointer.Int32(20),
|
||||
UpdateStrategy: kruiseappsv1beta1.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &kruiseappsv1beta1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: pointer.Int32Ptr(100),
|
||||
Partition: pointer.Int32(100),
|
||||
UnorderedUpdate: &kruiseappsv1beta1.UnorderedUpdateStrategy{
|
||||
PriorityStrategy: &appsv1pub.UpdatePriorityStrategy{
|
||||
OrderPriority: []appsv1pub.UpdatePriorityOrderTerm{
|
||||
|
|
@ -524,7 +524,7 @@ func TestCalculateBatchContextForAdvancedStatefulSet(t *testing.T) {
|
|||
Replicas: 20,
|
||||
UpdatedReplicas: 10,
|
||||
UpdatedReadyReplicas: 10,
|
||||
NoNeedUpdatedReplicas: pointer.Int32Ptr(10),
|
||||
NoNeedUpdatedReplicas: pointer.Int32(10),
|
||||
PlannedUpdatedReplicas: 4,
|
||||
DesiredUpdatedReplicas: 12,
|
||||
CurrentPartition: intstr.FromInt(100),
|
||||
|
|
|
|||
|
|
@ -39,8 +39,8 @@ import (
|
|||
// the pods that are really need to be rolled back according to release plan, but patch batch label according
|
||||
// original release plan, and will patch the pods that are really rolled back in priority.
|
||||
// - in batch 0: really roll back (20 - 10) * 20% = 2 pods, but 20 * 20% = 4 pod will be patched batch label;
|
||||
// - in batch 0: really roll back (20 - 10) * 50% = 5 pods, but 20 * 50% = 10 pod will be patched batch label;
|
||||
// - in batch 0: really roll back (20 - 10) * 100% = 10 pods, but 20 * 100% = 20 pod will be patched batch label;
|
||||
// - in batch 1: really roll back (20 - 10) * 50% = 5 pods, but 20 * 50% = 10 pod will be patched batch label;
|
||||
// - in batch 2: really roll back (20 - 10) * 100% = 10 pods, but 20 * 100% = 20 pod will be patched batch label;
|
||||
//
|
||||
// Mainly for PaaS platform display pod list in conveniently.
|
||||
//
|
||||
|
|
@ -92,8 +92,8 @@ func FilterPodsForUnorderedUpdate(pods []*corev1.Pod, ctx *batchcontext.BatchCon
|
|||
// the pods that are really need to be rolled back according to release plan, but patch batch label according
|
||||
// original release plan, and will patch the pods that are really rolled back in priority.
|
||||
// - in batch 0: really roll back (20 - 10) * 20% = 2 pods, but 20 * 20% = 4 pod will be patched batch label;
|
||||
// - in batch 0: really roll back (20 - 10) * 50% = 5 pods, but 20 * 50% = 10 pod will be patched batch label;
|
||||
// - in batch 0: really roll back (20 - 10) * 100% = 10 pods, but 20 * 100% = 20 pod will be patched batch label;
|
||||
// - in batch 1: really roll back (20 - 10) * 50% = 5 pods, but 20 * 50% = 10 pod will be patched batch label;
|
||||
// - in batch 2: really roll back (20 - 10) * 100% = 10 pods, but 20 * 100% = 20 pod will be patched batch label;
|
||||
//
|
||||
// Mainly for PaaS platform display pod list in conveniently.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -19,12 +19,14 @@ package labelpatch
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1beta1"
|
||||
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
|
@ -33,13 +35,14 @@ type LabelPatcher interface {
|
|||
PatchPodBatchLabel(ctx *batchcontext.BatchContext) error
|
||||
}
|
||||
|
||||
func NewLabelPatcher(cli client.Client, logKey klog.ObjectRef) *realPatcher {
|
||||
return &realPatcher{Client: cli, logKey: logKey}
|
||||
func NewLabelPatcher(cli client.Client, logKey klog.ObjectRef, batches []v1beta1.ReleaseBatch) *realPatcher {
|
||||
return &realPatcher{Client: cli, logKey: logKey, batches: batches}
|
||||
}
|
||||
|
||||
type realPatcher struct {
|
||||
client.Client
|
||||
logKey klog.ObjectRef
|
||||
batches []v1beta1.ReleaseBatch
|
||||
}
|
||||
|
||||
func (r *realPatcher) PatchPodBatchLabel(ctx *batchcontext.BatchContext) error {
|
||||
|
|
@ -55,59 +58,86 @@ func (r *realPatcher) PatchPodBatchLabel(ctx *batchcontext.BatchContext) error {
|
|||
|
||||
// PatchPodBatchLabel will patch rollout-id && batch-id to pods
|
||||
func (r *realPatcher) patchPodBatchLabel(pods []*corev1.Pod, ctx *batchcontext.BatchContext) error {
|
||||
// the number of active pods that has been patched successfully.
|
||||
patchedUpdatedReplicas := int32(0)
|
||||
// the number of target active pods that should be patched batch label.
|
||||
plannedUpdatedReplicas := ctx.PlannedUpdatedReplicas
|
||||
plannedUpdatedReplicasForBatches := r.calculatePlannedStepIncrements(r.batches, int(ctx.Replicas), int(ctx.CurrentBatch))
|
||||
var updatedButUnpatchedPods []*corev1.Pod
|
||||
|
||||
for _, pod := range pods {
|
||||
if !util.IsConsistentWithRevision(pod, ctx.UpdateRevision) {
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
klog.InfoS("Pod is being deleted, skip patching", "pod", klog.KObj(pod), "rollout", r.logKey)
|
||||
continue
|
||||
}
|
||||
|
||||
podRolloutID := pod.Labels[v1beta1.RolloutIDLabel]
|
||||
if pod.DeletionTimestamp.IsZero() && podRolloutID == ctx.RolloutID {
|
||||
patchedUpdatedReplicas++
|
||||
}
|
||||
}
|
||||
|
||||
// all pods that should be patched have been patched
|
||||
if patchedUpdatedReplicas >= plannedUpdatedReplicas {
|
||||
return nil // return fast
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
// we don't patch label for the active old revision pod
|
||||
if !util.IsConsistentWithRevision(pod, ctx.UpdateRevision) {
|
||||
klog.InfoS("Pod is not consistent with revision, skip patching", "pod", klog.KObj(pod), "rollout", r.logKey)
|
||||
continue
|
||||
}
|
||||
// we don't continue to patch if the goal is met
|
||||
if patchedUpdatedReplicas >= ctx.PlannedUpdatedReplicas {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if it has been patched, just ignore
|
||||
if pod.Labels[v1beta1.RolloutIDLabel] == ctx.RolloutID {
|
||||
if pod.Labels[v1beta1.RolloutIDLabel] != ctx.RolloutID {
|
||||
// for example: new/recreated pods
|
||||
updatedButUnpatchedPods = append(updatedButUnpatchedPods, pod)
|
||||
klog.InfoS("Find a pod to add updatedButUnpatchedPods", "pod", klog.KObj(pod), "rollout", r.logKey)
|
||||
continue
|
||||
}
|
||||
|
||||
podBatchID, err := strconv.Atoi(pod.Labels[v1beta1.RolloutBatchIDLabel])
|
||||
if err != nil {
|
||||
klog.InfoS("Pod batchID is not a number, skip patching", "pod", klog.KObj(pod), "rollout", r.logKey)
|
||||
continue
|
||||
}
|
||||
plannedUpdatedReplicasForBatches[podBatchID-1]--
|
||||
}
|
||||
klog.InfoS("updatedButUnpatchedPods amount calculated", "amount", len(updatedButUnpatchedPods), "rollout", r.logKey)
|
||||
// patch the pods
|
||||
for i := len(plannedUpdatedReplicasForBatches) - 1; i >= 0; i-- {
|
||||
for ; plannedUpdatedReplicasForBatches[i] > 0; plannedUpdatedReplicasForBatches[i]-- {
|
||||
if len(updatedButUnpatchedPods) == 0 {
|
||||
klog.Warningf("no pods to patch for %v, batch %d", r.logKey, i+1)
|
||||
return nil
|
||||
}
|
||||
// patch the updated but unpatced pod
|
||||
pod := updatedButUnpatchedPods[len(updatedButUnpatchedPods)-1]
|
||||
clone := util.GetEmptyObjectWithKey(pod)
|
||||
by := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s","%s":"%d"}}}`,
|
||||
v1beta1.RolloutIDLabel, ctx.RolloutID, v1beta1.RolloutBatchIDLabel, ctx.CurrentBatch+1)
|
||||
v1beta1.RolloutIDLabel, ctx.RolloutID, v1beta1.RolloutBatchIDLabel, i+1)
|
||||
if err := r.Patch(context.TODO(), clone, client.RawPatch(types.StrategicMergePatchType, []byte(by))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
patchedUpdatedReplicas++
|
||||
klog.InfoS("Successfully patch Pod batchID", "batchID", i+1, "pod", klog.KObj(pod), "rollout", r.logKey)
|
||||
// update the counter
|
||||
updatedButUnpatchedPods = updatedButUnpatchedPods[:len(updatedButUnpatchedPods)-1]
|
||||
}
|
||||
klog.Infof("Successfully patch Pod(%v) batchID %d label", klog.KObj(pod), ctx.CurrentBatch+1)
|
||||
klog.InfoS("All pods has been patched batchID", "batchID", i+1, "rollout", r.logKey)
|
||||
}
|
||||
|
||||
if patchedUpdatedReplicas >= plannedUpdatedReplicas {
|
||||
// for rollback in batch, it is possible that some updated pods are remained unpatched, we won't report error
|
||||
if len(updatedButUnpatchedPods) != 0 {
|
||||
klog.Warningf("still has %d pods to patch for %v", len(updatedButUnpatchedPods), r.logKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("patched %v pods for %v, however the goal is %d", patchedUpdatedReplicas, r.logKey, plannedUpdatedReplicas)
|
||||
}
|
||||
|
||||
func (r *realPatcher) calculatePlannedStepIncrements(batches []v1beta1.ReleaseBatch, workloadReplicas, currentBatch int) (res []int) {
|
||||
// batchIndex greater than currentBatch will be patched with zero
|
||||
res = make([]int, len(batches))
|
||||
for i := 0; i <= currentBatch; i++ {
|
||||
res[i] = calculateBatchReplicas(batches, workloadReplicas, i)
|
||||
}
|
||||
for i := currentBatch; i > 0; i-- {
|
||||
res[i] -= res[i-1]
|
||||
if res[i] < 0 {
|
||||
klog.Warningf("Rollout %v batch replicas increment is less than 0", r.logKey)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func calculateBatchReplicas(batches []v1beta1.ReleaseBatch, workloadReplicas, currentBatch int) int {
|
||||
batchSize, _ := intstr.GetScaledValueFromIntOrPercent(&batches[currentBatch].CanaryReplicas, workloadReplicas, true)
|
||||
if batchSize > workloadReplicas {
|
||||
klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is more than workload.replicas %v", currentBatch, batchSize, workloadReplicas)
|
||||
batchSize = workloadReplicas
|
||||
} else if batchSize < 0 {
|
||||
klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is less than 0 %v", currentBatch, batchSize)
|
||||
batchSize = 0
|
||||
}
|
||||
return batchSize
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import (
|
|||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
|
@ -46,7 +47,8 @@ func TestLabelPatcher(t *testing.T) {
|
|||
|
||||
cases := map[string]struct {
|
||||
batchContext func() *batchcontext.BatchContext
|
||||
expectedPatched int
|
||||
Batches []v1beta1.ReleaseBatch
|
||||
expectedPatched []int
|
||||
}{
|
||||
"10 pods, 0 patched, 5 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
|
|
@ -54,14 +56,18 @@ func TestLabelPatcher(t *testing.T) {
|
|||
RolloutID: "rollout-1",
|
||||
UpdateRevision: "version-1",
|
||||
PlannedUpdatedReplicas: 5,
|
||||
CurrentBatch: 0,
|
||||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, ctx.Replicas, 0, "", "", ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
expectedPatched: 5,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(5),
|
||||
},
|
||||
},
|
||||
expectedPatched: []int{5},
|
||||
},
|
||||
"10 pods, 2 patched, 3 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
|
|
@ -72,11 +78,16 @@ func TestLabelPatcher(t *testing.T) {
|
|||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, ctx.Replicas, 2,
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch)), ctx.UpdateRevision)
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch+1)), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
expectedPatched: 5,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(5),
|
||||
},
|
||||
},
|
||||
expectedPatched: []int{5},
|
||||
},
|
||||
"10 pods, 5 patched, 0 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
|
|
@ -87,11 +98,16 @@ func TestLabelPatcher(t *testing.T) {
|
|||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, ctx.Replicas, 5,
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch)), ctx.UpdateRevision)
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch+1)), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
expectedPatched: 5,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(5),
|
||||
},
|
||||
},
|
||||
expectedPatched: []int{5},
|
||||
},
|
||||
"10 pods, 7 patched, 0 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
|
|
@ -102,11 +118,16 @@ func TestLabelPatcher(t *testing.T) {
|
|||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, ctx.Replicas, 7,
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch)), ctx.UpdateRevision)
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch+1)), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
expectedPatched: 7,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(5),
|
||||
},
|
||||
},
|
||||
expectedPatched: []int{7},
|
||||
},
|
||||
"2 pods, 0 patched, 2 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
|
|
@ -117,11 +138,16 @@ func TestLabelPatcher(t *testing.T) {
|
|||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, 2, 0,
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch)), ctx.UpdateRevision)
|
||||
ctx.RolloutID, strconv.Itoa(int(ctx.CurrentBatch+1)), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
expectedPatched: 2,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(5),
|
||||
},
|
||||
},
|
||||
expectedPatched: []int{2},
|
||||
},
|
||||
"10 pods, 3 patched with old rollout-id, 5 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
|
|
@ -132,11 +158,76 @@ func TestLabelPatcher(t *testing.T) {
|
|||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, ctx.Replicas, 3,
|
||||
"previous-rollout-id", strconv.Itoa(int(ctx.CurrentBatch)), ctx.UpdateRevision)
|
||||
"previous-rollout-id", strconv.Itoa(int(ctx.CurrentBatch+1)), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
expectedPatched: 5,
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(5),
|
||||
},
|
||||
},
|
||||
expectedPatched: []int{5},
|
||||
},
|
||||
"10 pods, 2 patched with batch-id:1, 3 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
ctx := &batchcontext.BatchContext{
|
||||
RolloutID: "rollout-1",
|
||||
UpdateRevision: "version-1",
|
||||
PlannedUpdatedReplicas: 5,
|
||||
CurrentBatch: 1,
|
||||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, 5, 2,
|
||||
"rollout-1", strconv.Itoa(1), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{CanaryReplicas: intstr.FromInt(2)},
|
||||
{CanaryReplicas: intstr.FromInt(5)},
|
||||
},
|
||||
expectedPatched: []int{2, 3},
|
||||
},
|
||||
"10 pods, 0 patched with batch-id:1, 5 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
ctx := &batchcontext.BatchContext{
|
||||
RolloutID: "rollout-1",
|
||||
UpdateRevision: "version-1",
|
||||
PlannedUpdatedReplicas: 5,
|
||||
CurrentBatch: 1,
|
||||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, 5, 0,
|
||||
"rollout-1", strconv.Itoa(1), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{CanaryReplicas: intstr.FromInt(2)},
|
||||
{CanaryReplicas: intstr.FromInt(5)},
|
||||
},
|
||||
expectedPatched: []int{2, 3},
|
||||
},
|
||||
"10 pods, 3 patched with batch-id:1, 2 new patched": {
|
||||
batchContext: func() *batchcontext.BatchContext {
|
||||
ctx := &batchcontext.BatchContext{
|
||||
RolloutID: "rollout-1",
|
||||
UpdateRevision: "version-1",
|
||||
PlannedUpdatedReplicas: 5,
|
||||
CurrentBatch: 1,
|
||||
Replicas: 10,
|
||||
}
|
||||
pods := generatePods(1, 5, 3,
|
||||
"rollout-1", strconv.Itoa(1), ctx.UpdateRevision)
|
||||
ctx.Pods = pods
|
||||
return ctx
|
||||
},
|
||||
Batches: []v1beta1.ReleaseBatch{
|
||||
{CanaryReplicas: intstr.FromInt(2)},
|
||||
{CanaryReplicas: intstr.FromInt(5)},
|
||||
},
|
||||
expectedPatched: []int{3, 2},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -148,22 +239,21 @@ func TestLabelPatcher(t *testing.T) {
|
|||
objects = append(objects, pod)
|
||||
}
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build()
|
||||
patcher := NewLabelPatcher(cli, klog.ObjectRef{Name: "test"})
|
||||
patchErr := patcher.patchPodBatchLabel(ctx.Pods, ctx)
|
||||
patcher := NewLabelPatcher(cli, klog.ObjectRef{Name: "test"}, cs.Batches)
|
||||
patcher.patchPodBatchLabel(ctx.Pods, ctx)
|
||||
|
||||
podList := &corev1.PodList{}
|
||||
err := cli.List(context.TODO(), podList)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
patched := 0
|
||||
patched := make([]int, ctx.CurrentBatch+1)
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Labels[v1beta1.RolloutIDLabel] == ctx.RolloutID {
|
||||
patched++
|
||||
if batchID, err := strconv.Atoi(pod.Labels[v1beta1.RolloutBatchIDLabel]); err == nil {
|
||||
patched[batchID-1]++
|
||||
}
|
||||
}
|
||||
Expect(patched).Should(BeNumerically("==", cs.expectedPatched))
|
||||
if patched < int(ctx.PlannedUpdatedReplicas) {
|
||||
Expect(patchErr).To(HaveOccurred())
|
||||
}
|
||||
Expect(patched).To(Equal(cs.expectedPatched))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -368,7 +368,7 @@ func (m *blueGreenReleaseManager) createBatchRelease(rollout *v1beta1.Rollout, r
|
|||
ReleasePlan: v1beta1.ReleasePlan{
|
||||
Batches: batches,
|
||||
RolloutID: rolloutID,
|
||||
BatchPartition: utilpointer.Int32Ptr(batch),
|
||||
BatchPartition: utilpointer.Int32(batch),
|
||||
FailureThreshold: rollout.Spec.Strategy.BlueGreen.FailureThreshold,
|
||||
RollingStyle: v1beta1.BlueGreenRollingStyle,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -428,7 +428,7 @@ func (m *canaryReleaseManager) createBatchRelease(rollout *v1beta1.Rollout, roll
|
|||
ReleasePlan: v1beta1.ReleasePlan{
|
||||
Batches: batches,
|
||||
RolloutID: rolloutID,
|
||||
BatchPartition: utilpointer.Int32Ptr(batch),
|
||||
BatchPartition: utilpointer.Int32(batch),
|
||||
FailureThreshold: rollout.Spec.Strategy.Canary.FailureThreshold,
|
||||
PatchPodTemplateMetadata: rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata,
|
||||
RollingStyle: rollout.Spec.Strategy.GetRollingStyle(),
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ func TestRunCanary(t *testing.T) {
|
|||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
Controller: utilpointer.Bool(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ var (
|
|||
Kind: "Deployment",
|
||||
Name: "echoserver",
|
||||
UID: types.UID("606132e0-85ef-460a-8cf5-cd8f915a8cc3"),
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
Controller: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@ func (m *Manager) RouteAllTrafficToNewVersion(c *TrafficRoutingContext) (bool, e
|
|||
retry, remaining, err := grace.RunWithGraceSeconds(string(c.OwnerRef.UID), "updateRoute", graceSeconds, func() (bool, error) {
|
||||
// route all traffic to new version
|
||||
c.Strategy.Matches = nil
|
||||
c.Strategy.Traffic = utilpointer.StringPtr("100%")
|
||||
c.Strategy.Traffic = utilpointer.String("100%")
|
||||
//NOTE - This return value "verified" has the opposite semantics with "modified"
|
||||
verified, err := trController.EnsureRoutes(context.TODO(), &c.Strategy)
|
||||
if !verified {
|
||||
|
|
|
|||
|
|
@ -5070,8 +5070,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() {
|
|||
|
||||
// check pod batch label after scale
|
||||
By("check pod batch label after scale")
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 3)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 2)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 2)
|
||||
|
||||
// resume rollout canary
|
||||
By("check rollout canary status success, resume rollout, and wait rollout canary complete")
|
||||
|
|
@ -5080,8 +5080,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() {
|
|||
WaitCloneSetAllPodsReady(workload)
|
||||
|
||||
By("rollout completed, and check pod batch label")
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 3)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 2)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 2)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 2)
|
||||
CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "4", 4)
|
||||
})
|
||||
|
|
|
|||
Loading…
Reference in New Issue