Rewrite rollout controller code (#102)
Signed-off-by: liheng.zms <liheng.zms@alibaba-inc.com> Signed-off-by: liheng.zms <liheng.zms@alibaba-inc.com>
This commit is contained in:
parent
c0b1fea7f8
commit
973e39b0c8
|
|
@ -1,5 +1,5 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.16 as builder
|
||||
FROM golang:1.18 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.16 as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.18 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,23 @@ import (
|
|||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
const (
|
||||
// RolloutIDLabel is set to workload labels.
|
||||
// RolloutIDLabel is designed to distinguish each workload revision publications.
|
||||
// The value of RolloutIDLabel corresponds Rollout.Spec.RolloutID.
|
||||
RolloutIDLabel = "rollouts.kruise.io/rollout-id"
|
||||
|
||||
// RolloutBatchIDLabel is patched in pod labels.
|
||||
// RolloutBatchIDLabel is the label key of batch id that will be patched to pods during rollout.
|
||||
// Only when RolloutIDLabel is set, RolloutBatchIDLabel will be patched.
|
||||
// Users can use RolloutIDLabel and RolloutBatchIDLabel to select the pods that are upgraded in some certain batch and release.
|
||||
RolloutBatchIDLabel = "rollouts.kruise.io/rollout-batch-id"
|
||||
|
||||
// RollbackInBatchAnnotation is set to rollout annotations.
|
||||
// RollbackInBatchAnnotation allow use disable quick rollback, and will roll back in batch style.
|
||||
RollbackInBatchAnnotation = "rollouts.kruise.io/rollback-in-batch"
|
||||
)
|
||||
|
||||
// RolloutSpec defines the desired state of Rollout
|
||||
type RolloutSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
|
|
@ -34,9 +51,11 @@ type RolloutSpec struct {
|
|||
ObjectRef ObjectRef `json:"objectRef"`
|
||||
// rollout strategy
|
||||
Strategy RolloutStrategy `json:"strategy"`
|
||||
// DeprecatedRolloutID is the deprecated field.
|
||||
// It is recommended that configure RolloutId in workload.annotations[rollouts.kruise.io/rollout-id].
|
||||
// RolloutID should be changed before each workload revision publication.
|
||||
// It is to distinguish consecutive multiple workload publications and rollout progress.
|
||||
RolloutID string `json:"rolloutID,omitempty"`
|
||||
DeprecatedRolloutID string `json:"rolloutID,omitempty"`
|
||||
}
|
||||
|
||||
type ObjectRef struct {
|
||||
|
|
@ -172,17 +191,12 @@ type RolloutStatus struct {
|
|||
|
||||
// observedGeneration is the most recent generation observed for this Rollout.
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
// CanaryRevision the hash of the canary pod template
|
||||
// +optional
|
||||
//CanaryRevision string `json:"canaryRevision,omitempty"`
|
||||
// StableRevision indicates the revision pods that has successfully rolled out
|
||||
StableRevision string `json:"stableRevision,omitempty"`
|
||||
// Conditions a list of conditions a rollout can have.
|
||||
// +optional
|
||||
Conditions []RolloutCondition `json:"conditions,omitempty"`
|
||||
// Canary describes the state of the canary rollout
|
||||
// +optional
|
||||
CanaryStatus *CanaryStatus `json:"canaryStatus,omitempty"`
|
||||
// Conditions a list of conditions a rollout can have.
|
||||
// +optional
|
||||
Conditions []RolloutCondition `json:"conditions,omitempty"`
|
||||
// +optional
|
||||
//BlueGreenStatus *BlueGreenStatus `json:"blueGreenStatus,omitempty"`
|
||||
// Phase is the rollout phase.
|
||||
|
|
@ -221,11 +235,13 @@ const (
|
|||
ProgressingReasonInitializing = "Initializing"
|
||||
ProgressingReasonInRolling = "InRolling"
|
||||
ProgressingReasonFinalising = "Finalising"
|
||||
ProgressingReasonSucceeded = "Succeeded"
|
||||
ProgressingReasonCompleted = "Completed"
|
||||
ProgressingReasonCancelling = "Cancelling"
|
||||
ProgressingReasonCanceled = "Canceled"
|
||||
ProgressingReasonPaused = "Paused"
|
||||
|
||||
// RolloutConditionSucceeded indicates whether rollout is succeeded or failed.
|
||||
RolloutConditionSucceeded RolloutConditionType = "Succeeded"
|
||||
|
||||
// Terminating condition
|
||||
RolloutConditionTerminating RolloutConditionType = "Terminating"
|
||||
// Terminating Reason
|
||||
|
|
@ -241,11 +257,10 @@ type CanaryStatus struct {
|
|||
ObservedRolloutID string `json:"observedRolloutID,omitempty"`
|
||||
// RolloutHash from rollout.spec object
|
||||
RolloutHash string `json:"rolloutHash,omitempty"`
|
||||
// CanaryService holds the name of a service which selects pods with canary version and don't select any pods with stable version.
|
||||
CanaryService string `json:"canaryService"`
|
||||
// StableRevision indicates the revision of stable pods
|
||||
StableRevision string `json:"stableRevision,omitempty"`
|
||||
// CanaryRevision is calculated by rollout based on podTemplateHash, and the internal logic flow uses
|
||||
// It may be different from rs podTemplateHash in different k8s versions, so it cannot be used as service selector label
|
||||
// +optional
|
||||
CanaryRevision string `json:"canaryRevision"`
|
||||
// pod template hash is used as service selector label
|
||||
PodTemplateHash string `json:"podTemplateHash"`
|
||||
|
|
|
|||
|
|
@ -683,6 +683,11 @@ func (in *RolloutSpec) DeepCopy() *RolloutSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RolloutStatus) DeepCopyInto(out *RolloutStatus) {
|
||||
*out = *in
|
||||
if in.CanaryStatus != nil {
|
||||
in, out := &in.CanaryStatus, &out.CanaryStatus
|
||||
*out = new(CanaryStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]RolloutCondition, len(*in))
|
||||
|
|
@ -690,11 +695,6 @@ func (in *RolloutStatus) DeepCopyInto(out *RolloutStatus) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.CanaryStatus != nil {
|
||||
in, out := &in.CanaryStatus, &out.CanaryStatus
|
||||
*out = new(CanaryStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStatus.
|
||||
|
|
|
|||
|
|
@ -81,9 +81,11 @@ spec:
|
|||
type: object
|
||||
type: object
|
||||
rolloutID:
|
||||
description: RolloutID should be changed before each workload revision
|
||||
publication. It is to distinguish consecutive multiple workload
|
||||
publications and rollout progress.
|
||||
description: DeprecatedRolloutID is the deprecated field. It is recommended
|
||||
that configure RolloutId in workload.annotations[rollouts.kruise.io/rollout-id].
|
||||
RolloutID should be changed before each workload revision publication.
|
||||
It is to distinguish consecutive multiple workload publications
|
||||
and rollout progress.
|
||||
type: string
|
||||
strategy:
|
||||
description: rollout strategy
|
||||
|
|
@ -286,11 +288,6 @@ spec:
|
|||
different from rs podTemplateHash in different k8s versions,
|
||||
so it cannot be used as service selector label
|
||||
type: string
|
||||
canaryService:
|
||||
description: CanaryService holds the name of a service which selects
|
||||
pods with canary version and don't select any pods with stable
|
||||
version.
|
||||
type: string
|
||||
currentStepIndex:
|
||||
description: CurrentStepIndex defines the current step of the
|
||||
rollout is on. If the current step index is null, the controller
|
||||
|
|
@ -319,10 +316,13 @@ spec:
|
|||
rolloutHash:
|
||||
description: RolloutHash from rollout.spec object
|
||||
type: string
|
||||
stableRevision:
|
||||
description: StableRevision indicates the revision of stable pods
|
||||
type: string
|
||||
required:
|
||||
- canaryReadyReplicas
|
||||
- canaryReplicas
|
||||
- canaryService
|
||||
- canaryRevision
|
||||
- currentStepState
|
||||
- podTemplateHash
|
||||
type: object
|
||||
|
|
@ -374,11 +374,6 @@ spec:
|
|||
description: BlueGreenStatus *BlueGreenStatus `json:"blueGreenStatus,omitempty"`
|
||||
Phase is the rollout phase.
|
||||
type: string
|
||||
stableRevision:
|
||||
description: CanaryRevision the hash of the canary pod template CanaryRevision
|
||||
string `json:"canaryRevision,omitempty"` StableRevision indicates
|
||||
the revision pods that has successfully rolled out
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
|
|
|||
|
|
@ -8,3 +8,10 @@ webhooks:
|
|||
matchExpressions:
|
||||
- key: rollouts.kruise.io/workload-type
|
||||
operator: Exists
|
||||
- name: mdeployment.kb.io
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: control-plane
|
||||
operator: NotIn
|
||||
values:
|
||||
- controller-manager
|
||||
|
|
|
|||
54
go.mod
54
go.mod
|
|
@ -1,6 +1,6 @@
|
|||
module github.com/openkruise/rollouts
|
||||
|
||||
go 1.16
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
|
|
@ -23,3 +23,55 @@ require (
|
|||
sigs.k8s.io/gateway-api v0.4.3
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.81.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/go-logr/zapr v0.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 // indirect
|
||||
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
)
|
||||
|
|
|
|||
3
go.sum
3
go.sum
|
|
@ -90,7 +90,6 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
|
|||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
|
|
@ -672,7 +671,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -1002,7 +1000,6 @@ k8s.io/component-base v0.22.6/go.mod h1:ngHLefY4J5fq2fApNdbWyj4yh0lvw36do4aAjNN8
|
|||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
|
||||
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
|
|
|
|||
2
main.go
2
main.go
|
|
@ -26,7 +26,6 @@ import (
|
|||
br "github.com/openkruise/rollouts/pkg/controller/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollouthistory"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
utilclient "github.com/openkruise/rollouts/pkg/util/client"
|
||||
utilfeature "github.com/openkruise/rollouts/pkg/util/feature"
|
||||
"github.com/openkruise/rollouts/pkg/webhook"
|
||||
|
|
@ -102,7 +101,6 @@ func main() {
|
|||
if err = (&rollout.RolloutReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Finder: util.NewControllerFinder(mgr.GetClient()),
|
||||
Recorder: mgr.GetEventRecorderFor("rollout-controller"),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Rollout")
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ func isWorkloadRevisionChanged(event control.WorkloadEventType, release *v1alpha
|
|||
}
|
||||
|
||||
func isWorkloadRollbackInBatch(event control.WorkloadEventType, release *v1alpha1.BatchRelease) bool {
|
||||
return (event == control.WorkloadRollbackInBatch || release.Annotations[util.RollbackInBatchAnnotation] != "") &&
|
||||
return (event == control.WorkloadRollbackInBatch || release.Annotations[v1alpha1.RollbackInBatchAnnotation] != "") &&
|
||||
release.Status.CanaryStatus.NoNeedUpdateReplicas == nil && release.Status.Phase == v1alpha1.RolloutPhaseProgressing
|
||||
}
|
||||
|
||||
|
|
@ -201,7 +201,7 @@ func isWorkloadUnstable(event control.WorkloadEventType, _ *v1alpha1.BatchReleas
|
|||
}
|
||||
|
||||
func isRollbackInBatchSatisfied(workloadInfo *util.WorkloadInfo, release *v1alpha1.BatchRelease) bool {
|
||||
return workloadInfo.Status.StableRevision == workloadInfo.Status.UpdateRevision && release.Annotations[util.RollbackInBatchAnnotation] != ""
|
||||
return workloadInfo.Status.StableRevision == workloadInfo.Status.UpdateRevision && release.Annotations[v1alpha1.RollbackInBatchAnnotation] != ""
|
||||
}
|
||||
|
||||
func signalRePrepareRollback(newStatus *v1alpha1.BatchReleaseStatus) {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
|
@ -99,7 +100,7 @@ func batchLabelSatisfied(pods []*corev1.Pod, rolloutID string, targetCount int32
|
|||
if !pod.DeletionTimestamp.IsZero() {
|
||||
return false
|
||||
}
|
||||
return pod.Labels[util.RolloutIDLabel] == rolloutID
|
||||
return pod.Labels[v1alpha1.RolloutIDLabel] == rolloutID
|
||||
})
|
||||
return patchedCount >= int(targetCount)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -151,11 +151,11 @@ func TestIsBatchReady(t *testing.T) {
|
|||
func generatePods(updatedReplicas, noNeedRollbackReplicas int) []*corev1.Pod {
|
||||
podsNoNeed := generatePodsWith(map[string]string{
|
||||
util.NoNeedUpdatePodLabel: "0x1",
|
||||
util.RolloutIDLabel: "1",
|
||||
v1alpha1.RolloutIDLabel: "1",
|
||||
apps.ControllerRevisionHashLabelKey: "version-1",
|
||||
}, noNeedRollbackReplicas, 0)
|
||||
return append(generatePodsWith(map[string]string{
|
||||
util.RolloutIDLabel: "1",
|
||||
v1alpha1.RolloutIDLabel: "1",
|
||||
apps.ControllerRevisionHashLabelKey: "version-1",
|
||||
}, updatedReplicas-noNeedRollbackReplicas, noNeedRollbackReplicas), podsNoNeed...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ func (rc *realBatchControlPlane) SyncWorkloadInformation() (control.WorkloadEven
|
|||
// - err: whether error occurs.
|
||||
func (rc *realBatchControlPlane) markNoNeedUpdatePodsIfNeeds() (*int32, error) {
|
||||
// currently, we only support rollback scene, in the future, we may support more scenes.
|
||||
if rc.release.Annotations[util.RollbackInBatchAnnotation] == "" {
|
||||
if rc.release.Annotations[v1alpha1.RollbackInBatchAnnotation] == "" {
|
||||
return nil, nil
|
||||
}
|
||||
// currently, if rollout-id is not set, it is no scene which require patch this label
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -59,7 +60,7 @@ func FilterPodsForUnorderedUpdate(pods []*corev1.Pod, ctx *batchcontext.BatchCon
|
|||
if !util.IsConsistentWithRevision(pod, ctx.UpdateRevision) {
|
||||
continue
|
||||
}
|
||||
if pod.Labels[util.NoNeedUpdatePodLabel] == ctx.RolloutID && pod.Labels[util.RolloutIDLabel] != ctx.RolloutID {
|
||||
if pod.Labels[util.NoNeedUpdatePodLabel] == ctx.RolloutID && pod.Labels[v1alpha1.RolloutIDLabel] != ctx.RolloutID {
|
||||
noNeedUpdate++
|
||||
lowPriorityPods = append(lowPriorityPods, pod)
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -64,7 +65,7 @@ func (r *realPatcher) patchPodBatchLabel(pods []*corev1.Pod, ctx *batchcontext.B
|
|||
continue
|
||||
}
|
||||
|
||||
podRolloutID := pod.Labels[util.RolloutIDLabel]
|
||||
podRolloutID := pod.Labels[v1alpha1.RolloutIDLabel]
|
||||
if pod.DeletionTimestamp.IsZero() && podRolloutID == ctx.RolloutID {
|
||||
patchedUpdatedReplicas++
|
||||
}
|
||||
|
|
@ -88,13 +89,13 @@ func (r *realPatcher) patchPodBatchLabel(pods []*corev1.Pod, ctx *batchcontext.B
|
|||
}
|
||||
|
||||
// if it has been patched, just ignore
|
||||
if pod.Labels[util.RolloutIDLabel] == ctx.RolloutID {
|
||||
if pod.Labels[v1alpha1.RolloutIDLabel] == ctx.RolloutID {
|
||||
continue
|
||||
}
|
||||
|
||||
clone := util.GetEmptyObjectWithKey(pod)
|
||||
by := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s","%s":"%d"}}}`,
|
||||
util.RolloutIDLabel, ctx.RolloutID, util.RolloutBatchIDLabel, ctx.CurrentBatch+1)
|
||||
v1alpha1.RolloutIDLabel, ctx.RolloutID, v1alpha1.RolloutBatchIDLabel, ctx.CurrentBatch+1)
|
||||
if err := r.Patch(context.TODO(), clone, client.RawPatch(types.StrategicMergePatchType, []byte(by))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -156,7 +156,7 @@ func TestLabelPatcher(t *testing.T) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
patched := 0
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Labels[util.RolloutIDLabel] == ctx.RolloutID {
|
||||
if pod.Labels[v1alpha1.RolloutIDLabel] == ctx.RolloutID {
|
||||
patched++
|
||||
}
|
||||
}
|
||||
|
|
@ -170,8 +170,8 @@ func TestLabelPatcher(t *testing.T) {
|
|||
|
||||
func generatePods(ordinalBegin, ordinalEnd, labeled int32, rolloutID, batchID, version string) []*corev1.Pod {
|
||||
podsWithLabel := generateLabeledPods(map[string]string{
|
||||
util.RolloutIDLabel: rolloutID,
|
||||
util.RolloutBatchIDLabel: batchID,
|
||||
v1alpha1.RolloutIDLabel: rolloutID,
|
||||
v1alpha1.RolloutBatchIDLabel: batchID,
|
||||
apps.ControllerRevisionHashLabelKey: version,
|
||||
}, int(labeled), int(ordinalBegin))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package batchrelease
|
||||
|
||||
import rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
|
||||
// BatchRelease is not the actual controller of the BatchRelease controller,
|
||||
// but rather the ability to interact with the BatchRelease controller through the BatchRelease CRD to achieve a batch release
|
||||
type BatchRelease interface {
|
||||
// Verify will create batchRelease or update batchRelease steps configuration and
|
||||
// return whether the batchRelease configuration is consistent with the rollout step
|
||||
Verify(index int32) (bool, error)
|
||||
|
||||
// SyncRolloutID will sync rollout id from Rollout to BatchRelease
|
||||
SyncRolloutID(currentID string) error
|
||||
|
||||
// 1. Promote release workload in step(index), 1<=index<=len(step)
|
||||
// 2. Promote will resume stable workload if the last batch(index=-1) is finished
|
||||
Promote(index int32, isRollback, checkReady bool) (bool, error)
|
||||
|
||||
// FetchBatchRelease fetch batchRelease
|
||||
FetchBatchRelease() (*rolloutv1alpha1.BatchRelease, error)
|
||||
|
||||
// Finalize clean up batchRelease
|
||||
// 1. delete canary deployments
|
||||
// 2. delete batchRelease CRD
|
||||
Finalize() (bool, error)
|
||||
}
|
||||
|
|
@ -1,336 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package batchrelease
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
// rollouts.kruise.io
|
||||
BatchReleaseOwnerRefLabel = "rollouts.kruise.io/owner-ref"
|
||||
)
|
||||
|
||||
type innerBatchRelease struct {
|
||||
client.Client
|
||||
|
||||
rollout *rolloutv1alpha1.Rollout
|
||||
|
||||
batchName string
|
||||
rolloutID string
|
||||
}
|
||||
|
||||
func NewInnerBatchController(c client.Client, rollout *rolloutv1alpha1.Rollout, rolloutID string) BatchRelease {
|
||||
r := &innerBatchRelease{
|
||||
Client: c,
|
||||
rollout: rollout,
|
||||
rolloutID: rolloutID,
|
||||
batchName: rolloutBatchName(rollout),
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) SyncRolloutID(currentID string) error {
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
batch := &rolloutv1alpha1.BatchRelease{}
|
||||
if err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.batchName}, batch); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil // just return nil if batchRelease not exist
|
||||
}
|
||||
return err
|
||||
}
|
||||
if batch.Spec.ReleasePlan.RolloutID == currentID {
|
||||
return nil
|
||||
}
|
||||
batch.Spec.ReleasePlan.RolloutID = r.rolloutID
|
||||
if err := r.Client.Update(context.TODO(), batch); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) update batchRelease rolloutID %s failed: %s", r.rollout.Namespace, r.rollout.Name, currentID, err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) Verify(index int32) (bool, error) {
|
||||
index = index - 1
|
||||
batch := &rolloutv1alpha1.BatchRelease{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.batchName}, batch)
|
||||
if errors.IsNotFound(err) {
|
||||
// create new BatchRelease Crd
|
||||
br := createBatchRelease(r.rollout, r.batchName, r.rolloutID)
|
||||
if err = r.Create(context.TODO(), br); err != nil && !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("rollout(%s/%s) create BatchRelease failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
data := util.DumpJSON(br)
|
||||
klog.Infof("rollout(%s/%s) create BatchRelease(%s) success", r.rollout.Namespace, r.rollout.Name, data)
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) fetch BatchRelease failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
|
||||
// check whether batchRelease configuration is the latest
|
||||
newBr := createBatchRelease(r.rollout, r.batchName, r.rolloutID)
|
||||
if batchPlanDeepEqual(batch, newBr, index) {
|
||||
klog.Infof("rollout(%s/%s) batchRelease(generation:%d) configuration is the latest", r.rollout.Namespace, r.rollout.Name, batch.Generation)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// update batchRelease to the latest version
|
||||
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err = r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.batchName}, batch); err != nil {
|
||||
klog.Errorf("error getting updated BatchRelease(%s/%s) from client", batch.Namespace, batch.Name)
|
||||
return err
|
||||
}
|
||||
batch.Spec.ReleasePlan.RolloutID = r.rolloutID
|
||||
batch.Spec.ReleasePlan.Batches = newBr.Spec.ReleasePlan.Batches
|
||||
batch.Spec.ReleasePlan.BatchPartition = utilpointer.Int32Ptr(index)
|
||||
if err = r.Client.Update(context.TODO(), batch); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) update batchRelease configuration failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
data := util.DumpJSON(batch)
|
||||
klog.Infof("rollout(%s/%s) update batchRelease configuration(%s) to the latest", r.rollout.Namespace, r.rollout.Name, data)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) FetchBatchRelease() (*rolloutv1alpha1.BatchRelease, error) {
|
||||
batch := &rolloutv1alpha1.BatchRelease{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.batchName}, batch)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) fetch BatchRelease failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) Promote(index int32, isRollback, checkReady bool) (bool, error) {
|
||||
// Promote will resume stable workload if the last batch(index=-1) is finished
|
||||
if index == -1 {
|
||||
return r.resumeStableWorkload(checkReady)
|
||||
}
|
||||
|
||||
// batch release workload's pods
|
||||
index = index - 1
|
||||
batch := &rolloutv1alpha1.BatchRelease{}
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.batchName}, batch); err != nil {
|
||||
klog.Errorf("error getting updated BatchRelease(%s/%s) from client", batch.Namespace, batch.Name)
|
||||
return err
|
||||
}
|
||||
if IsPromoted(r.rollout, batch, isRollback) {
|
||||
return nil
|
||||
}
|
||||
if isRollback && len(r.rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
if batch.Annotations == nil {
|
||||
batch.Annotations = map[string]string{}
|
||||
}
|
||||
// only rollback case should update this rollout id for BatchRelease.
|
||||
batch.Spec.ReleasePlan.RolloutID = r.rolloutID
|
||||
batch.Annotations[util.RollbackInBatchAnnotation] = r.rollout.Annotations[util.RollbackInBatchAnnotation]
|
||||
}
|
||||
|
||||
if batch.Labels == nil {
|
||||
batch.Labels = map[string]string{}
|
||||
}
|
||||
batch.Spec.ReleasePlan.BatchPartition = utilpointer.Int32Ptr(index)
|
||||
if err := r.Client.Update(context.TODO(), batch); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) promote batchRelease BatchPartition(%d) success", r.rollout.Namespace, r.rollout.Name, index)
|
||||
return nil
|
||||
}); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) promote batchRelease BatchPartition(%d) failed: %s", r.rollout.Namespace, r.rollout.Name, index, err.Error())
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) resumeStableWorkload(waitReady bool) (bool, error) {
|
||||
batch, err := r.FetchBatchRelease()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// The Completed phase means batchRelease controller has processed all it
|
||||
// should process. If BatchRelease phase is completed, we can do nothing.
|
||||
if batch.Status.Phase == rolloutv1alpha1.RolloutPhaseCompleted {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If BatchPartition is nil, BatchRelease will directly resume workload via:
|
||||
// - * set workload Paused = false if it needs;
|
||||
// - * set workload Partition = null if it needs.
|
||||
if batch.Spec.ReleasePlan.BatchPartition == nil {
|
||||
// - If checkReady is true, finalizing policy must be "WaitResume";
|
||||
// - If checkReady is false, finalizing policy must be NOT "WaitResume";
|
||||
// Otherwise, we should correct it.
|
||||
switch batch.Spec.ReleasePlan.FinalizingPolicy {
|
||||
case rolloutv1alpha1.WaitResumeFinalizingPolicyType:
|
||||
if waitReady { // no need to patch again
|
||||
return false, nil
|
||||
}
|
||||
default:
|
||||
if !waitReady { // no need to patch again
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correct finalizing policy.
|
||||
policy := rolloutv1alpha1.ImmediateFinalizingPolicyType
|
||||
if waitReady {
|
||||
policy = rolloutv1alpha1.WaitResumeFinalizingPolicyType
|
||||
}
|
||||
|
||||
// Patch BatchPartition and FinalizingPolicy, BatchPartition always patch null here.
|
||||
body := fmt.Sprintf(`{"spec":{"releasePlan":{"batchPartition":null,"finalizingPolicy":"%s"}}}`, policy)
|
||||
if err = r.Patch(context.TODO(), batch, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) Finalize() (bool, error) {
|
||||
batch := &rolloutv1alpha1.BatchRelease{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.batchName}, batch)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
klog.Infof("rollout(%s/%s) delete BatchRelease success", r.rollout.Namespace, r.rollout.Name)
|
||||
return true, nil
|
||||
} else if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) fetch BatchRelease failed: %s", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, err
|
||||
}
|
||||
if !batch.DeletionTimestamp.IsZero() {
|
||||
klog.Infof("rollout(%s/%s) BatchRelease is terminating, and wait a moment", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
//delete batchRelease
|
||||
err = r.Delete(context.TODO(), batch)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) delete BatchRelease failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) delete BatchRelease, and wait a moment", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func createBatchRelease(rollout *rolloutv1alpha1.Rollout, batchName, rolloutID string) *rolloutv1alpha1.BatchRelease {
|
||||
var batches []rolloutv1alpha1.ReleaseBatch
|
||||
for _, step := range rollout.Spec.Strategy.Canary.Steps {
|
||||
if step.Replicas == nil {
|
||||
batches = append(batches, rolloutv1alpha1.ReleaseBatch{CanaryReplicas: intstr.FromString(strconv.Itoa(int(*step.Weight)) + "%")})
|
||||
} else {
|
||||
batches = append(batches, rolloutv1alpha1.ReleaseBatch{CanaryReplicas: *step.Replicas})
|
||||
}
|
||||
}
|
||||
|
||||
br := &rolloutv1alpha1.BatchRelease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: rollout.Namespace,
|
||||
Name: batchName,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(rollout, schema.GroupVersionKind{
|
||||
Group: rolloutv1alpha1.SchemeGroupVersion.Group,
|
||||
Version: rolloutv1alpha1.SchemeGroupVersion.Version,
|
||||
Kind: "Rollout",
|
||||
}),
|
||||
},
|
||||
Labels: map[string]string{
|
||||
BatchReleaseOwnerRefLabel: rollout.Name,
|
||||
},
|
||||
},
|
||||
Spec: rolloutv1alpha1.BatchReleaseSpec{
|
||||
TargetRef: rolloutv1alpha1.ObjectRef{
|
||||
WorkloadRef: &rolloutv1alpha1.WorkloadRef{
|
||||
APIVersion: rollout.Spec.ObjectRef.WorkloadRef.APIVersion,
|
||||
Kind: rollout.Spec.ObjectRef.WorkloadRef.Kind,
|
||||
Name: rollout.Spec.ObjectRef.WorkloadRef.Name,
|
||||
},
|
||||
},
|
||||
ReleasePlan: rolloutv1alpha1.ReleasePlan{
|
||||
Batches: batches,
|
||||
RolloutID: rolloutID,
|
||||
BatchPartition: utilpointer.Int32Ptr(0),
|
||||
FailureThreshold: rollout.Spec.Strategy.Canary.FailureThreshold,
|
||||
},
|
||||
},
|
||||
}
|
||||
return br
|
||||
}
|
||||
|
||||
// {workload.name}-batch
|
||||
func rolloutBatchName(rollout *rolloutv1alpha1.Rollout) string {
|
||||
return rollout.Name
|
||||
}
|
||||
|
||||
// IsPromoted return true if the current batch has been promoted:
|
||||
// - 1. BatchRelease BatchPartition == Rollout currentStepIndex-1;
|
||||
// - 2. Rollback annotation has been patched to BatchRelease when rolling back.
|
||||
func IsPromoted(rollout *rolloutv1alpha1.Rollout, batch *rolloutv1alpha1.BatchRelease, isRollback bool) bool {
|
||||
currentBatch := int32(0)
|
||||
if rollout.Status.CanaryStatus != nil {
|
||||
currentBatch = rollout.Status.CanaryStatus.CurrentStepIndex - 1
|
||||
}
|
||||
|
||||
if batch.Spec.ReleasePlan.BatchPartition == nil || *batch.Spec.ReleasePlan.BatchPartition != currentBatch {
|
||||
return false
|
||||
}
|
||||
|
||||
if isRollback && batch.Annotations[util.RollbackInBatchAnnotation] != rollout.Annotations[util.RollbackInBatchAnnotation] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func batchPlanDeepEqual(old, new *rolloutv1alpha1.BatchRelease, currentBatch int32) bool {
|
||||
if old.Spec.ReleasePlan.BatchPartition == nil || *old.Spec.ReleasePlan.BatchPartition != currentBatch {
|
||||
return false
|
||||
}
|
||||
if old.Spec.ReleasePlan.RolloutID != new.Spec.ReleasePlan.RolloutID {
|
||||
return false
|
||||
}
|
||||
return reflect.DeepEqual(old.Spec.ReleasePlan.Batches, new.Spec.ReleasePlan.Batches)
|
||||
}
|
||||
|
|
@ -1,303 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (r *rolloutContext) runCanary() error {
|
||||
canaryStatus := r.newStatus.CanaryStatus
|
||||
// init canary status
|
||||
if canaryStatus.CanaryRevision == "" {
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
canaryStatus.CanaryRevision = r.workload.CanaryRevision
|
||||
canaryStatus.ObservedRolloutID = getRolloutID(r.workload, r.rollout)
|
||||
canaryStatus.CurrentStepIndex = 1
|
||||
canaryStatus.RolloutHash = r.rollout.Annotations[util.RolloutHashAnnotation]
|
||||
}
|
||||
|
||||
// update canary status
|
||||
batch, err := r.batchControl.FetchBatchRelease()
|
||||
if err != nil {
|
||||
canaryStatus.Message = "BatchRelease not found"
|
||||
canaryStatus.CanaryReplicas = r.workload.CanaryReplicas
|
||||
canaryStatus.CanaryReadyReplicas = r.workload.CanaryReadyReplicas
|
||||
} else {
|
||||
canaryStatus.Message = fmt.Sprintf("BatchRelease at state %s, id %s, step %d",
|
||||
batch.Status.CanaryStatus.CurrentBatchState, batch.Status.ObservedRolloutID, batch.Status.CanaryStatus.CurrentBatch+1)
|
||||
canaryStatus.CanaryReplicas = batch.Status.CanaryStatus.UpdatedReplicas
|
||||
canaryStatus.CanaryReadyReplicas = batch.Status.CanaryStatus.UpdatedReadyReplicas
|
||||
}
|
||||
|
||||
// sync rollout-id to batchRelease if we need
|
||||
if err := r.batchControl.SyncRolloutID(r.newStatus.CanaryStatus.ObservedRolloutID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch canaryStatus.CurrentStepState {
|
||||
case rolloutv1alpha1.CanaryStepStateUpgrade:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", r.rollout.Namespace, r.rollout.Name, rolloutv1alpha1.CanaryStepStateUpgrade)
|
||||
done, err := r.doCanaryUpgrade()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateTrafficRouting
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", r.rollout.Namespace, r.rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, rolloutv1alpha1.CanaryStepStateUpgrade, canaryStatus.CurrentStepState)
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.CanaryStepStateTrafficRouting:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", r.rollout.Namespace, r.rollout.Name, rolloutv1alpha1.CanaryStepStateTrafficRouting)
|
||||
done, err := r.doCanaryTrafficRouting()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateMetricsAnalysis
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", r.rollout.Namespace, r.rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, rolloutv1alpha1.CanaryStepStateTrafficRouting, canaryStatus.CurrentStepState)
|
||||
}
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
r.recheckTime = &expectedTime
|
||||
|
||||
case rolloutv1alpha1.CanaryStepStateMetricsAnalysis:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", r.rollout.Namespace, r.rollout.Name, rolloutv1alpha1.CanaryStepStateMetricsAnalysis)
|
||||
done, err := r.doCanaryMetricsAnalysis()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStatePaused
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", r.rollout.Namespace, r.rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, rolloutv1alpha1.CanaryStepStateMetricsAnalysis, canaryStatus.CurrentStepState)
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.CanaryStepStatePaused:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", r.rollout.Namespace, r.rollout.Name, rolloutv1alpha1.CanaryStepStatePaused)
|
||||
done, err := r.doCanaryPaused()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateReady
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", r.rollout.Namespace, r.rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, rolloutv1alpha1.CanaryStepStatePaused, canaryStatus.CurrentStepState)
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.CanaryStepStateReady:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", r.rollout.Namespace, r.rollout.Name, rolloutv1alpha1.CanaryStepStateReady)
|
||||
// run next step
|
||||
if len(r.rollout.Spec.Strategy.Canary.Steps) > int(canaryStatus.CurrentStepIndex) {
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepIndex++
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
klog.Infof("rollout(%s/%s) canary step from(%d) -> to(%d)", r.rollout.Namespace, r.rollout.Name, canaryStatus.CurrentStepIndex-1, canaryStatus.CurrentStepIndex)
|
||||
} else {
|
||||
klog.Infof("rollout(%s/%s) canary run all steps, and completed", r.rollout.Namespace, r.rollout.Name)
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateCompleted
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", r.rollout.Namespace, r.rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, rolloutv1alpha1.CanaryStepStateReady, canaryStatus.CurrentStepState)
|
||||
// canary completed
|
||||
case rolloutv1alpha1.CanaryStepStateCompleted:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", r.rollout.Namespace, r.rollout.Name, rolloutv1alpha1.CanaryStepStateCompleted)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) doCanaryUpgrade() (bool, error) {
|
||||
// only traffic routing
|
||||
/*if len(r.rollout.Spec.Strategy.Canary.Steps) == 0 {
|
||||
if r.workload.CanaryReadyReplicas > 0 {
|
||||
klog.Infof("rollout(%s/%s) workload(%s) canaryAvailable(%d), and go to the next stage",
|
||||
r.rollout.Namespace, r.rollout.Name, r.workload.Name, r.workload.CanaryReadyReplicas)
|
||||
return true, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) workload(%s) canaryAvailable(%d), and wait a moment",
|
||||
r.rollout.Namespace, r.rollout.Name, r.workload.Name, r.workload.CanaryReadyReplicas)
|
||||
return false, nil
|
||||
}*/
|
||||
|
||||
// verify whether batchRelease configuration is the latest
|
||||
steps := len(r.rollout.Spec.Strategy.Canary.Steps)
|
||||
canaryStatus := r.newStatus.CanaryStatus
|
||||
isLatest, err := r.batchControl.Verify(canaryStatus.CurrentStepIndex)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if !isLatest {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// fetch batchRelease
|
||||
batch, err := r.batchControl.FetchBatchRelease()
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if batch.Status.ObservedReleasePlanHash != util.HashReleasePlanBatches(&batch.Spec.ReleasePlan) ||
|
||||
batch.Generation != batch.Status.ObservedGeneration {
|
||||
klog.Infof("rollout(%s/%s) batchReleasePlan is not consistent, and wait a moment", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
batchData := util.DumpJSON(batch.Status)
|
||||
cond := util.GetRolloutCondition(*r.newStatus, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and upgrade workload new versions", canaryStatus.CurrentStepIndex, steps)
|
||||
r.newStatus.Message = cond.Message
|
||||
// promote workload next batch release
|
||||
if !batchrelease.IsPromoted(r.rollout, batch, r.workload.IsInRollback) {
|
||||
r.recorder.Eventf(r.rollout, corev1.EventTypeNormal, "Progressing", fmt.Sprintf("start upgrade step(%d) canary pods with new versions", canaryStatus.CurrentStepIndex))
|
||||
klog.Infof("rollout(%s/%s) will promote batch from(%d) -> to(%d)", r.rollout.Namespace, r.rollout.Name, *batch.Spec.ReleasePlan.BatchPartition+1, canaryStatus.CurrentStepIndex)
|
||||
return r.batchControl.Promote(canaryStatus.CurrentStepIndex, r.workload.IsInRollback, false)
|
||||
}
|
||||
|
||||
// check whether batchRelease is ready
|
||||
if batch.Status.CanaryStatus.CurrentBatchState != rolloutv1alpha1.ReadyBatchState ||
|
||||
batch.Status.CanaryStatus.CurrentBatch+1 < canaryStatus.CurrentStepIndex {
|
||||
klog.Infof("rollout(%s/%s) batch(%s) state(%s), and wait a moment",
|
||||
r.rollout.Namespace, r.rollout.Name, batchData, batch.Status.CanaryStatus.CurrentBatchState)
|
||||
return false, nil
|
||||
}
|
||||
r.recorder.Eventf(r.rollout, corev1.EventTypeNormal, "Progressing", fmt.Sprintf("upgrade step(%d) canary pods with new versions done", canaryStatus.CurrentStepIndex))
|
||||
klog.Infof("rollout(%s/%s) batch(%s) state(%s), and success",
|
||||
r.rollout.Namespace, r.rollout.Name, batchData, batch.Status.CanaryStatus.CurrentBatchState)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) doCanaryMetricsAnalysis() (bool, error) {
|
||||
// todo
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) doCanaryPaused() (bool, error) {
|
||||
// No step set, need manual confirmation
|
||||
if len(r.rollout.Spec.Strategy.Canary.Steps) == 0 {
|
||||
klog.Infof("rollout(%s/%s) don't contains steps, and need manual confirmation", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
canaryStatus := r.newStatus.CanaryStatus
|
||||
currentStep := r.rollout.Spec.Strategy.Canary.Steps[canaryStatus.CurrentStepIndex-1]
|
||||
steps := len(r.rollout.Spec.Strategy.Canary.Steps)
|
||||
cond := util.GetRolloutCondition(*r.newStatus, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
// need manual confirmation
|
||||
if currentStep.Pause.Duration == nil {
|
||||
klog.Infof("rollout(%s/%s) don't set pause duration, and need manual confirmation", r.rollout.Namespace, r.rollout.Name)
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and you need manually confirm to enter the next step", canaryStatus.CurrentStepIndex, steps)
|
||||
r.newStatus.Message = cond.Message
|
||||
return false, nil
|
||||
}
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and wait duration(%d seconds) to enter the next step", canaryStatus.CurrentStepIndex, steps, *currentStep.Pause.Duration)
|
||||
r.newStatus.Message = cond.Message
|
||||
// wait duration time, then go to next step
|
||||
duration := time.Second * time.Duration(*currentStep.Pause.Duration)
|
||||
expectedTime := canaryStatus.LastUpdateTime.Add(duration)
|
||||
if expectedTime.Before(time.Now()) {
|
||||
klog.Infof("rollout(%s/%s) canary step(%d) paused duration(%d seconds), and go to the next step",
|
||||
r.rollout.Namespace, r.rollout.Name, canaryStatus.CurrentStepIndex, *currentStep.Pause.Duration)
|
||||
return true, nil
|
||||
}
|
||||
r.recheckTime = &expectedTime
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// cleanup after rollout is completed or finished
|
||||
func (r *rolloutContext) doCanaryFinalising() (bool, error) {
|
||||
// when CanaryStatus is nil, which means canary action hasn't started yet, don't need doing cleanup
|
||||
if r.newStatus.CanaryStatus == nil {
|
||||
return true, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) in finalizing: remove rollout state in workload", r.rollout.Namespace, r.rollout.Name)
|
||||
// 1. rollout progressing complete, allow workload paused=false in webhook
|
||||
err := r.removeRolloutStateInWorkload()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) in finalizing: restore stable service", r.rollout.Namespace, r.rollout.Name)
|
||||
// 2. restore stable service, remove podRevision selector
|
||||
done, err := r.restoreStableService()
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 3. upgrade stable deployment, set paused=false
|
||||
// isComplete indicates whether rollout progressing complete, and wait for all pods are ready
|
||||
// else indicates rollout is canceled
|
||||
klog.Infof("rollout(%s/%s) in finalizing: upgrade stable workload", r.rollout.Namespace, r.rollout.Name)
|
||||
done, err = r.batchControl.Promote(-1, false, r.isComplete)
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 4. route all traffic to stable service
|
||||
klog.Infof("rollout(%s/%s) in finalizing: restore traffic routing", r.rollout.Namespace, r.rollout.Name)
|
||||
done, err = r.doFinalisingTrafficRouting()
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 5. delete batchRelease crd
|
||||
klog.Infof("rollout(%s/%s) in finalizing: remove batchRelease crd", r.rollout.Namespace, r.rollout.Name)
|
||||
done, err = r.batchControl.Finalize()
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Finalize batchRelease failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
} else if !done {
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) do finalize success", r.rollout.Namespace, r.rollout.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) removeRolloutStateInWorkload() error {
|
||||
if r.workload == nil || r.rollout.Spec.ObjectRef.WorkloadRef == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := r.workload.Annotations[util.InRolloutProgressingAnnotation]; !ok {
|
||||
return nil
|
||||
}
|
||||
workloadRef := r.rollout.Spec.ObjectRef.WorkloadRef
|
||||
workloadGVK := schema.FromAPIVersionAndKind(workloadRef.APIVersion, workloadRef.Kind)
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
obj := util.GetEmptyWorkloadObject(workloadGVK)
|
||||
if obj == nil {
|
||||
return nil
|
||||
}
|
||||
if err := r.Get(context.TODO(), types.NamespacedName{Name: r.workload.Name, Namespace: r.workload.Namespace}, obj); err != nil {
|
||||
klog.Errorf("getting updated workload(%s.%s) failed: %s", r.workload.Namespace, r.workload.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
annotations := obj.GetAnnotations()
|
||||
delete(annotations, util.InRolloutProgressingAnnotation)
|
||||
obj.SetAnnotations(annotations)
|
||||
return r.Update(context.TODO(), obj)
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) workload(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.workload.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("remove rollout(%s/%s) workload(%s) annotation[%s] success", r.rollout.Namespace, r.rollout.Name, r.workload.Name, util.InRolloutProgressingAnnotation)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type rolloutContext struct {
|
||||
client.Client
|
||||
|
||||
rollout *rolloutv1alpha1.Rollout
|
||||
|
||||
newStatus *rolloutv1alpha1.RolloutStatus
|
||||
|
||||
isComplete bool
|
||||
|
||||
stableService string
|
||||
|
||||
canaryService string
|
||||
|
||||
workload *util.Workload
|
||||
|
||||
batchControl batchrelease.BatchRelease
|
||||
|
||||
recheckTime *time.Time
|
||||
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func newRolloutContext(client client.Client, recorder record.EventRecorder, rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus, workload *util.Workload) *rolloutContext {
|
||||
rolloutCon := &rolloutContext{
|
||||
Client: client,
|
||||
rollout: rollout,
|
||||
newStatus: newStatus,
|
||||
batchControl: batchrelease.NewInnerBatchController(client, rollout, getRolloutID(workload, rollout)),
|
||||
workload: workload,
|
||||
recorder: recorder,
|
||||
}
|
||||
if len(rolloutCon.rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
rolloutCon.stableService = rolloutCon.rollout.Spec.Strategy.Canary.TrafficRoutings[0].Service
|
||||
rolloutCon.canaryService = fmt.Sprintf("%s-canary", rolloutCon.stableService)
|
||||
}
|
||||
return rolloutCon
|
||||
}
|
||||
|
||||
func (r *rolloutContext) reconcile() error {
|
||||
// canary strategy
|
||||
if r.rollout.Spec.Strategy.Canary != nil {
|
||||
klog.Infof("rollout(%s/%s) run Canary action...", r.rollout.Namespace, r.rollout.Name)
|
||||
return r.runCanary()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) finalising() (bool, error) {
|
||||
// canary strategy
|
||||
if r.rollout.Spec.Strategy.Canary != nil {
|
||||
done, err := r.doCanaryFinalising()
|
||||
if err == nil && !done {
|
||||
// The finalizer is not finished, wait one second
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
r.recheckTime = &expectedTime
|
||||
}
|
||||
return done, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) podRevisionLabelKey() string {
|
||||
if r.workload == nil {
|
||||
return ""
|
||||
}
|
||||
return r.workload.RevisionLabelKey
|
||||
}
|
||||
|
||||
func getRolloutID(workload *util.Workload, rollout *rolloutv1alpha1.Rollout) string {
|
||||
if workload != nil {
|
||||
firstChoice := workload.Labels[util.RolloutIDLabel]
|
||||
if firstChoice != "" {
|
||||
return firstChoice
|
||||
}
|
||||
}
|
||||
if rollout != nil {
|
||||
return rollout.Spec.RolloutID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
func (r *RolloutReconciler) reconcileRolloutTerminating(rollout *rolloutv1alpha1.Rollout) (*time.Time, error) {
|
||||
cond := util.GetRolloutCondition(rollout.Status, rolloutv1alpha1.RolloutConditionTerminating)
|
||||
if cond.Reason == rolloutv1alpha1.TerminatingReasonCompleted {
|
||||
return nil, nil
|
||||
}
|
||||
newStatus := rollout.Status.DeepCopy()
|
||||
done, recheckTime, err := r.doFinalising(rollout, newStatus, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if done {
|
||||
klog.Infof("rollout(%s/%s) is terminating, and state from(%s) -> to(%s)", rollout.Namespace, rollout.Name, cond.Reason, rolloutv1alpha1.TerminatingReasonCompleted)
|
||||
cond.Reason = rolloutv1alpha1.TerminatingReasonCompleted
|
||||
cond.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(newStatus, *cond)
|
||||
}
|
||||
err = r.updateRolloutStatusInternal(rollout, *newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) status failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return recheckTime, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doFinalising(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus, isComplete bool) (bool, *time.Time, error) {
|
||||
klog.Infof("reconcile rollout(%s/%s) doFinalising", rollout.Namespace, rollout.Name)
|
||||
// fetch target workload
|
||||
workload, err := r.Finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) GetWorkloadForRef failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, nil, err
|
||||
}
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, workload)
|
||||
rolloutCon.isComplete = isComplete
|
||||
done, err := rolloutCon.finalising()
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Progressing failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, nil, err
|
||||
} else if !done {
|
||||
klog.Infof("rollout(%s/%s) finalizer is not finished, and retry reconcile", rollout.Namespace, rollout.Name)
|
||||
return false, rolloutCon.recheckTime, nil
|
||||
}
|
||||
//newStatus.CanaryStatus = nil
|
||||
klog.Infof("run rollout(%s/%s) Progressing Finalising done", rollout.Namespace, rollout.Name)
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// handle adding and handle finalizer logic, it turns if we should continue to reconcile
|
||||
func (r *RolloutReconciler) handleFinalizer(rollout *rolloutv1alpha1.Rollout) error {
|
||||
// delete rollout crd, remove finalizer
|
||||
if !rollout.DeletionTimestamp.IsZero() {
|
||||
cond := util.GetRolloutCondition(rollout.Status, rolloutv1alpha1.RolloutConditionTerminating)
|
||||
if cond != nil && cond.Reason == rolloutv1alpha1.TerminatingReasonCompleted {
|
||||
// Completed
|
||||
if controllerutil.ContainsFinalizer(rollout, util.KruiseRolloutFinalizer) {
|
||||
err := util.UpdateFinalizer(r.Client, rollout, util.RemoveFinalizerOpType, util.KruiseRolloutFinalizer)
|
||||
if err != nil {
|
||||
klog.Errorf("remove rollout(%s/%s) finalizer failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("remove rollout(%s/%s) finalizer success", rollout.Namespace, rollout.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create rollout crd, add finalizer
|
||||
if !controllerutil.ContainsFinalizer(rollout, util.KruiseRolloutFinalizer) {
|
||||
err := util.UpdateFinalizer(r.Client, rollout, util.AddFinalizerOpType, util.KruiseRolloutFinalizer)
|
||||
if err != nil {
|
||||
klog.Errorf("register rollout(%s/%s) finalizer failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("register rollout(%s/%s) finalizer success", rollout.Namespace, rollout.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,248 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var defaultGracePeriodSeconds int32 = 3
|
||||
|
||||
// parameter1 retryReconcile, parameter2 error
|
||||
func (r *RolloutReconciler) reconcileRolloutProgressing(rollout *rolloutv1alpha1.Rollout) (*time.Time, error) {
|
||||
cond := util.GetRolloutCondition(rollout.Status, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
klog.Infof("reconcile rollout(%s/%s) progressing action", rollout.Namespace, rollout.Name)
|
||||
workload, err := r.Finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get workload failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if workload == nil {
|
||||
klog.Errorf("rollout(%s/%s) workload Not Found", rollout.Namespace, rollout.Name)
|
||||
return nil, nil
|
||||
} else if !workload.IsStatusConsistent {
|
||||
klog.Infof("rollout(%s/%s) workload status isn't consistent, then wait a moment", rollout.Namespace, rollout.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var recheckTime *time.Time
|
||||
newStatus := rollout.Status.DeepCopy()
|
||||
switch cond.Reason {
|
||||
case rolloutv1alpha1.ProgressingReasonInitializing:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
// new canaryStatus
|
||||
newStatus.CanaryStatus = &rolloutv1alpha1.CanaryStatus{}
|
||||
done, _, err := r.doProgressingInitializing(rollout, newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) doProgressingInitializing error(%s)", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonInRolling, "Rollout is in Progressing")
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
recheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) doProgressingInitializing is incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonInRolling:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
recheckTime, err = r.doProgressingInRolling(rollout, workload, newStatus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonFinalising:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
var done bool
|
||||
done, recheckTime, err = r.doFinalising(rollout, newStatus, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// finalizer is finished
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, rolloutv1alpha1.ProgressingReasonSucceeded, "Rollout has been completed, and succeed")
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonPaused:
|
||||
if workload.IsInRollback {
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload has been rollback, then rollout is canceled")
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback, then rollout canceled", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonCancelling, "The workload has been rolled back and the rollout process will be cancelled")
|
||||
// from paused to inRolling
|
||||
} else if !rollout.Spec.Strategy.Paused {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, but paused", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonInRolling, "")
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonCancelling:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
var done bool
|
||||
done, recheckTime, err = r.doFinalising(rollout, newStatus, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// finalizer is finished
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonCanceled, "")
|
||||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonSucceeded, rolloutv1alpha1.ProgressingReasonCanceled:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
}
|
||||
|
||||
err = r.updateRolloutStatusInternal(rollout, *newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) status failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return recheckTime, nil
|
||||
}
|
||||
|
||||
func progressingStateTransition(status *rolloutv1alpha1.RolloutStatus, condStatus corev1.ConditionStatus, reason, message string) {
|
||||
cond := util.GetRolloutCondition(*status, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
if cond == nil {
|
||||
cond = util.NewRolloutCondition(rolloutv1alpha1.RolloutConditionProgressing, condStatus, reason, message)
|
||||
} else {
|
||||
cond.Status = condStatus
|
||||
cond.Reason = reason
|
||||
if message != "" {
|
||||
cond.Message = message
|
||||
}
|
||||
}
|
||||
util.SetRolloutCondition(status, *cond)
|
||||
status.Message = cond.Message
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doProgressingInitializing(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (bool, string, error) {
|
||||
// canary release
|
||||
return r.verifyCanaryStrategy(rollout, newStatus)
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doProgressingReset(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (bool, error) {
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, nil)
|
||||
if rolloutCon.rollout.Spec.Strategy.Canary.TrafficRoutings != nil {
|
||||
// 1. remove stable service podRevision selector
|
||||
done, err := rolloutCon.restoreStableService()
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 2. route all traffic to stable service
|
||||
done, err = rolloutCon.doFinalisingTrafficRouting()
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
}
|
||||
|
||||
// 3. delete batchRelease CRD
|
||||
done, err := rolloutCon.batchControl.Finalize()
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) DoFinalising batchRelease failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, err
|
||||
} else if !done {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) verifyCanaryStrategy(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (bool, string, error) {
|
||||
canary := rollout.Spec.Strategy.Canary
|
||||
// Traffic routing
|
||||
if canary.TrafficRoutings != nil && len(canary.TrafficRoutings) > 0 {
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, nil)
|
||||
trController, err := rolloutCon.newTrafficRoutingController(rolloutCon)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
if ok, msg, err := r.verifyTrafficRouting(rollout.Namespace, canary.TrafficRoutings[0], trController); !ok {
|
||||
return ok, msg, err
|
||||
}
|
||||
}
|
||||
|
||||
// It is not allowed to modify the rollout.spec in progressing phase (validate webhook rollout),
|
||||
// but in many scenarios the user may modify the workload and rollout spec at the same time,
|
||||
// and there is a possibility that the workload is released first, and due to some network or other reasons the rollout spec is delayed by a few seconds,
|
||||
// so this is mainly compatible with this scenario.
|
||||
cond := util.GetRolloutCondition(*newStatus, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
if verifyTime := cond.LastUpdateTime.Add(time.Second * time.Duration(defaultGracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("verify rollout(%s/%s) TrafficRouting done, and wait a moment", rollout.Namespace, rollout.Name)
|
||||
return false, "", nil
|
||||
}
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) verifyTrafficRouting(ns string, tr *rolloutv1alpha1.TrafficRouting, c trafficrouting.Controller) (bool, string, error) {
|
||||
// check service
|
||||
service := &corev1.Service{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: tr.Service}, service)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, fmt.Sprintf("Service(%s/%s) is Not Found", ns, tr.Service), nil
|
||||
}
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
// check the traffic routing configuration
|
||||
err = c.Initialize(context.TODO())
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) reCalculateCanaryStepIndex(rollout *rolloutv1alpha1.Rollout, batchControl batchrelease.BatchRelease) (int32, error) {
|
||||
batch, err := batchControl.FetchBatchRelease()
|
||||
if errors.IsNotFound(err) {
|
||||
return 1, nil
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
workload, err := r.Finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get workload failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return 0, err
|
||||
}
|
||||
currentReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.Spec.ReleasePlan.Batches[*batch.Spec.ReleasePlan.BatchPartition].CanaryReplicas, int(workload.Replicas), true)
|
||||
|
||||
var stepIndex int32
|
||||
for i := range rollout.Spec.Strategy.Canary.Steps {
|
||||
step := rollout.Spec.Strategy.Canary.Steps[i]
|
||||
var desiredReplicas int
|
||||
if step.Replicas != nil {
|
||||
desiredReplicas, _ = intstr.GetScaledValueFromIntOrPercent(step.Replicas, int(workload.Replicas), true)
|
||||
} else {
|
||||
replicas := intstr.FromString(strconv.Itoa(int(*step.Weight)) + "%")
|
||||
desiredReplicas, _ = intstr.GetScaledValueFromIntOrPercent(&replicas, int(workload.Replicas), true)
|
||||
}
|
||||
stepIndex = int32(i + 1)
|
||||
if currentReplicas <= desiredReplicas {
|
||||
break
|
||||
}
|
||||
}
|
||||
return stepIndex, nil
|
||||
}
|
||||
|
|
@ -1,266 +0,0 @@
|
|||
/*
|
||||
Copyright 2021.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
func TestReCalculateCanaryStepIndex(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getObj func() (*apps.Deployment, *apps.ReplicaSet)
|
||||
getRollout func() *rolloutv1alpha1.Rollout
|
||||
getBatchRelease func() *rolloutv1alpha1.BatchRelease
|
||||
expectStepIndex int32
|
||||
}{
|
||||
{
|
||||
name: "steps changed v1",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *rolloutv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []rolloutv1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(20),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(50),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *rolloutv1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []rolloutv1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("60%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 2,
|
||||
},
|
||||
{
|
||||
name: "steps changed v2",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *rolloutv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []rolloutv1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(20),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(40),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *rolloutv1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []rolloutv1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("60%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 2,
|
||||
},
|
||||
{
|
||||
name: "steps changed v3",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *rolloutv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []rolloutv1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(40),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(60),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *rolloutv1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []rolloutv1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("20%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(1)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 1,
|
||||
},
|
||||
{
|
||||
name: "steps changed v4",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *rolloutv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []rolloutv1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(10),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(30),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *rolloutv1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []rolloutv1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("20%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 2,
|
||||
},
|
||||
{
|
||||
name: "steps changed v5",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *rolloutv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []rolloutv1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(2),
|
||||
Replicas: &intstr.IntOrString{
|
||||
Type: intstr.String,
|
||||
StrVal: "10%",
|
||||
},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(3),
|
||||
Replicas: &intstr.IntOrString{
|
||||
Type: intstr.String,
|
||||
StrVal: "10%",
|
||||
},
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *rolloutv1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []rolloutv1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("10%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("20%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("30%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
client.Create(context.TODO(), cs.getBatchRelease())
|
||||
dep, rs := cs.getObj()
|
||||
client.Create(context.TODO(), dep)
|
||||
client.Create(context.TODO(), rs)
|
||||
client.Create(context.TODO(), cs.getRollout())
|
||||
|
||||
reconciler := &RolloutReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Finder: util.NewControllerFinder(client),
|
||||
}
|
||||
batchControl := batchrelease.NewInnerBatchController(client, cs.getRollout(), "")
|
||||
newStepIndex, err := reconciler.reCalculateCanaryStepIndex(cs.getRollout(), batchControl)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
if cs.expectStepIndex != newStepIndex {
|
||||
t.Fatalf("expect %d, but %d", cs.expectStepIndex, newStepIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,159 +0,0 @@
|
|||
package rollout
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (r *RolloutReconciler) doProgressingInRolling(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
// Handle the 5 special cases firstly, and we had better keep the order of following cases:
|
||||
|
||||
switch {
|
||||
// 1. In case of rollback in a quick way, un-paused and just use workload rolling strategy
|
||||
case isRollingBackDirectly(rollout, workload):
|
||||
return r.handleRollbackDirectly(rollout, workload, newStatus)
|
||||
|
||||
// 2. In case of rollout paused, just stop reconcile
|
||||
case isRolloutPaused(rollout):
|
||||
return r.handleRolloutPaused(rollout, newStatus)
|
||||
|
||||
// 3. In case of rollback in a batch way, use rollout step strategy
|
||||
case isRollingBackInBatches(rollout, workload):
|
||||
return r.handleRollbackInBatches(rollout, workload, newStatus)
|
||||
|
||||
// 4. In case of continuous publishing(v1 -> v2 -> v3), restart publishing
|
||||
case isContinuousRelease(rollout, workload):
|
||||
return r.handleContinuousRelease(rollout, workload, newStatus)
|
||||
|
||||
// 5. In case of rollout plan changed, recalculate and publishing
|
||||
case isRolloutPlanChanged(rollout):
|
||||
return r.handleRolloutPlanChanged(rollout, workload, newStatus)
|
||||
}
|
||||
|
||||
return r.handleNormalRolling(rollout, workload, newStatus)
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRolloutPaused(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, but paused", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonPaused, "Rollout has been paused, you can resume it by kube-cli")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleContinuousRelease(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload continuous publishing canaryRevision, then restart publishing")
|
||||
klog.Infof("rollout(%s/%s) workload continuous publishing canaryRevision from(%s) -> to(%s), then restart publishing",
|
||||
rollout.Namespace, rollout.Name, newStatus.CanaryStatus.CanaryRevision, workload.CanaryRevision)
|
||||
|
||||
var recheckTime *time.Time
|
||||
done, err := r.doProgressingReset(rollout, newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) doProgressingReset failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonInitializing, "Workload is continuous release")
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset complete", rollout.Namespace, rollout.Name)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
recheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
return recheckTime, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRollbackDirectly(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload has been rollback, then rollout is canceled")
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback directly, then rollout canceled", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonCancelling, "The workload has been rolled back and the rollout process will be cancelled")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRollbackInBatches(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
// restart from the beginning
|
||||
newStatus.CanaryStatus.CurrentStepIndex = 1
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
newStatus.CanaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
newStatus.CanaryStatus.RolloutHash = rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback in batches, then restart from beginning", rollout.Namespace, rollout.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRolloutPlanChanged(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
batchControl := batchrelease.NewInnerBatchController(r.Client, rollout, getRolloutID(workload, rollout))
|
||||
newStepIndex, err := r.reCalculateCanaryStepIndex(rollout, batchControl)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) reCalculate Canary StepIndex failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
// canary step configuration change causes current step index change
|
||||
newStatus.CanaryStatus.CurrentStepIndex = newStepIndex
|
||||
newStatus.CanaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
newStatus.CanaryStatus.RolloutHash = rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) canary step configuration change, and stepIndex(%d) state(%s)",
|
||||
rollout.Namespace, rollout.Name, newStatus.CanaryStatus.CurrentStepIndex, newStatus.CanaryStatus.CurrentStepState)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleNormalRolling(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
//check if canary is done
|
||||
if newStatus.CanaryStatus.CurrentStepState == rolloutv1alpha1.CanaryStepStateCompleted {
|
||||
klog.Infof("rollout(%s/%s) progressing rolling done", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, rolloutv1alpha1.ProgressingReasonFinalising, "Rollout has been completed and some closing work is being done")
|
||||
} else { // rollout is in rolling
|
||||
newStatus.CanaryStatus.PodTemplateHash = workload.PodTemplateHash
|
||||
return r.doNormalRolling(rollout, workload, newStatus)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doNormalRolling(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, workload)
|
||||
err := rolloutCon.reconcile()
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Progressing failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return rolloutCon.recheckTime, nil
|
||||
}
|
||||
|
||||
/* **********************************************************************
|
||||
help functions
|
||||
*********************************************************************** */
|
||||
func isRolloutPaused(rollout *rolloutv1alpha1.Rollout) bool {
|
||||
return rollout.Spec.Strategy.Paused
|
||||
}
|
||||
|
||||
func isRolloutPlanChanged(rollout *rolloutv1alpha1.Rollout) bool {
|
||||
status := &rollout.Status
|
||||
return status.CanaryStatus.RolloutHash != "" && status.CanaryStatus.RolloutHash != rollout.Annotations[util.RolloutHashAnnotation]
|
||||
}
|
||||
|
||||
func isContinuousRelease(rollout *rolloutv1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
return status.CanaryStatus.CanaryRevision != "" && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && !workload.IsInRollback
|
||||
}
|
||||
|
||||
func isRollingBackDirectly(rollout *rolloutv1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
inBatch := util.IsRollbackInBatchPolicy(rollout, workload.Labels)
|
||||
return workload.IsInRollback && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && !inBatch
|
||||
}
|
||||
|
||||
func isRollingBackInBatches(rollout *rolloutv1alpha1.Rollout, workload *util.Workload) bool {
|
||||
// currently, only support the case of no traffic routing
|
||||
if len(rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
return false
|
||||
}
|
||||
status := &rollout.Status
|
||||
inBatch := util.IsRollbackInBatchPolicy(rollout, workload.Labels)
|
||||
return workload.IsInRollback && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && inBatch
|
||||
}
|
||||
|
|
@ -0,0 +1,453 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type canaryReleaseManager struct {
|
||||
client.Client
|
||||
trafficRoutingManager *trafficrouting.Manager
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) runCanary(c *util.RolloutContext) error {
|
||||
canaryStatus := c.NewStatus.CanaryStatus
|
||||
if br, err := m.fetchBatchRelease(c.Rollout.Namespace, c.Rollout.Name); err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("rollout(%s/%s) fetch batchRelease failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return err
|
||||
} else if err == nil {
|
||||
// This line will do something important:
|
||||
// - sync status from br to Rollout: to better observability;
|
||||
// - sync rollout-id from Rollout to br: to make BatchRelease
|
||||
// relabels pods in the scene where only rollout-id is changed.
|
||||
if err = m.syncBatchRelease(br, canaryStatus); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) sync batchRelease failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
// update podTemplateHash, Why is this position assigned?
|
||||
// Because If workload is deployment, only after canary pod already was created,
|
||||
// we can get the podTemplateHash from pod.annotations[pod-template-hash]
|
||||
if canaryStatus.PodTemplateHash == "" {
|
||||
canaryStatus.PodTemplateHash = c.Workload.PodTemplateHash
|
||||
}
|
||||
|
||||
switch canaryStatus.CurrentStepState {
|
||||
case v1alpha1.CanaryStepStateUpgrade:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", c.Rollout.Namespace, c.Rollout.Name, v1alpha1.CanaryStepStateUpgrade)
|
||||
done, err := m.doCanaryUpgrade(c)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.CurrentStepState = v1alpha1.CanaryStepStateTrafficRouting
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", c.Rollout.Namespace, c.Rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, v1alpha1.CanaryStepStateUpgrade, canaryStatus.CurrentStepState)
|
||||
}
|
||||
|
||||
case v1alpha1.CanaryStepStateTrafficRouting:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", c.Rollout.Namespace, c.Rollout.Name, v1alpha1.CanaryStepStateTrafficRouting)
|
||||
done, err := m.trafficRoutingManager.DoTrafficRouting(c)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepState = v1alpha1.CanaryStepStateMetricsAnalysis
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", c.Rollout.Namespace, c.Rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, v1alpha1.CanaryStepStateTrafficRouting, canaryStatus.CurrentStepState)
|
||||
}
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
c.RecheckTime = &expectedTime
|
||||
|
||||
case v1alpha1.CanaryStepStateMetricsAnalysis:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", c.Rollout.Namespace, c.Rollout.Name, v1alpha1.CanaryStepStateMetricsAnalysis)
|
||||
done, err := m.doCanaryMetricsAnalysis(c)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.CurrentStepState = v1alpha1.CanaryStepStatePaused
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", c.Rollout.Namespace, c.Rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, v1alpha1.CanaryStepStateMetricsAnalysis, canaryStatus.CurrentStepState)
|
||||
}
|
||||
|
||||
case v1alpha1.CanaryStepStatePaused:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", c.Rollout.Namespace, c.Rollout.Name, v1alpha1.CanaryStepStatePaused)
|
||||
done, err := m.doCanaryPaused(c)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepState = v1alpha1.CanaryStepStateReady
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", c.Rollout.Namespace, c.Rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, v1alpha1.CanaryStepStatePaused, canaryStatus.CurrentStepState)
|
||||
}
|
||||
|
||||
case v1alpha1.CanaryStepStateReady:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", c.Rollout.Namespace, c.Rollout.Name, v1alpha1.CanaryStepStateReady)
|
||||
// run next step
|
||||
if len(c.Rollout.Spec.Strategy.Canary.Steps) > int(canaryStatus.CurrentStepIndex) {
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepIndex++
|
||||
canaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
klog.Infof("rollout(%s/%s) canary step from(%d) -> to(%d)", c.Rollout.Namespace, c.Rollout.Name, canaryStatus.CurrentStepIndex-1, canaryStatus.CurrentStepIndex)
|
||||
} else {
|
||||
klog.Infof("rollout(%s/%s) canary run all steps, and completed", c.Rollout.Namespace, c.Rollout.Name)
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
canaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) step(%d) state from(%s) -> to(%s)", c.Rollout.Namespace, c.Rollout.Name,
|
||||
canaryStatus.CurrentStepIndex, v1alpha1.CanaryStepStateReady, canaryStatus.CurrentStepState)
|
||||
// canary completed
|
||||
case v1alpha1.CanaryStepStateCompleted:
|
||||
klog.Infof("rollout(%s/%s) run canary strategy, and state(%s)", c.Rollout.Namespace, c.Rollout.Name, v1alpha1.CanaryStepStateCompleted)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) doCanaryUpgrade(c *util.RolloutContext) (bool, error) {
|
||||
// verify whether batchRelease configuration is the latest
|
||||
steps := len(c.Rollout.Spec.Strategy.Canary.Steps)
|
||||
canaryStatus := c.NewStatus.CanaryStatus
|
||||
cond := util.GetRolloutCondition(*c.NewStatus, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and upgrade workload to new version", canaryStatus.CurrentStepIndex, steps)
|
||||
c.NewStatus.Message = cond.Message
|
||||
// run batch release to upgrade the workloads
|
||||
done, br, err := m.runBatchRelease(c.Rollout, getRolloutID(c.Workload), canaryStatus.CurrentStepIndex, c.Workload.IsInRollback)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if !done {
|
||||
return false, nil
|
||||
}
|
||||
if br.Status.ObservedReleasePlanHash != util.HashReleasePlanBatches(&br.Spec.ReleasePlan) ||
|
||||
br.Generation != br.Status.ObservedGeneration {
|
||||
klog.Infof("rollout(%s/%s) batchRelease status is inconsistent, and wait a moment", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
// check whether batchRelease is ready(whether new pods is ready.)
|
||||
if br.Status.CanaryStatus.CurrentBatchState != v1alpha1.ReadyBatchState ||
|
||||
br.Status.CanaryStatus.CurrentBatch+1 < canaryStatus.CurrentStepIndex {
|
||||
klog.Infof("rollout(%s/%s) batchRelease status(%s) is not ready, and wait a moment", c.Rollout.Namespace, c.Rollout.Name, util.DumpJSON(br.Status))
|
||||
return false, nil
|
||||
}
|
||||
m.recorder.Eventf(c.Rollout, corev1.EventTypeNormal, "Progressing", fmt.Sprintf("upgrade step(%d) canary pods with new versions done", canaryStatus.CurrentStepIndex))
|
||||
klog.Infof("rollout(%s/%s) batch(%s) state(%s), and success",
|
||||
c.Rollout.Namespace, c.Rollout.Name, util.DumpJSON(br.Status), br.Status.CanaryStatus.CurrentBatchState)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) doCanaryMetricsAnalysis(c *util.RolloutContext) (bool, error) {
|
||||
// todo
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) doCanaryPaused(c *util.RolloutContext) (bool, error) {
|
||||
canaryStatus := c.NewStatus.CanaryStatus
|
||||
currentStep := c.Rollout.Spec.Strategy.Canary.Steps[canaryStatus.CurrentStepIndex-1]
|
||||
steps := len(c.Rollout.Spec.Strategy.Canary.Steps)
|
||||
// If it is the last step, and 100% of pods, then return true
|
||||
if int32(steps) == canaryStatus.CurrentStepIndex {
|
||||
if currentStep.Weight != nil && *currentStep.Weight == 100 ||
|
||||
currentStep.Replicas != nil && currentStep.Replicas.StrVal == "100%" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
cond := util.GetRolloutCondition(*c.NewStatus, v1alpha1.RolloutConditionProgressing)
|
||||
// need manual confirmation
|
||||
if currentStep.Pause.Duration == nil {
|
||||
klog.Infof("rollout(%s/%s) don't set pause duration, and need manual confirmation", c.Rollout.Namespace, c.Rollout.Name)
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and you need manually confirm to enter the next step", canaryStatus.CurrentStepIndex, steps)
|
||||
c.NewStatus.Message = cond.Message
|
||||
return false, nil
|
||||
}
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and wait duration(%d seconds) to enter the next step", canaryStatus.CurrentStepIndex, steps, *currentStep.Pause.Duration)
|
||||
c.NewStatus.Message = cond.Message
|
||||
// wait duration time, then go to next step
|
||||
duration := time.Second * time.Duration(*currentStep.Pause.Duration)
|
||||
expectedTime := canaryStatus.LastUpdateTime.Add(duration)
|
||||
if expectedTime.Before(time.Now()) {
|
||||
klog.Infof("rollout(%s/%s) canary step(%d) paused duration(%d seconds), and go to the next step",
|
||||
c.Rollout.Namespace, c.Rollout.Name, canaryStatus.CurrentStepIndex, *currentStep.Pause.Duration)
|
||||
return true, nil
|
||||
}
|
||||
c.RecheckTime = &expectedTime
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// cleanup after rollout is completed or finished
|
||||
func (m *canaryReleaseManager) doCanaryFinalising(c *util.RolloutContext) (bool, error) {
|
||||
// when CanaryStatus is nil, which means canary action hasn't started yet, don't need doing cleanup
|
||||
if c.NewStatus.CanaryStatus == nil {
|
||||
return true, nil
|
||||
}
|
||||
// 1. rollout progressing complete, remove rollout progressing annotation in workload
|
||||
err := m.removeRolloutProgressingAnnotation(c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// 2. remove stable service the pod revision selector, so stable service will be selector all version pods.
|
||||
done, err := m.trafficRoutingManager.FinalisingTrafficRouting(c, true)
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 3. set workload.pause=false; set workload.partition=0
|
||||
done, err = m.finalizingBatchRelease(c)
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 4. modify network api(ingress or gateway api) configuration, and route 100% traffic to stable pods.
|
||||
done, err = m.trafficRoutingManager.FinalisingTrafficRouting(c, false)
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
// 5. delete batchRelease crd
|
||||
done, err = m.removeBatchRelease(c)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Finalize batchRelease failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return false, err
|
||||
} else if !done {
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) doCanaryFinalising success", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) removeRolloutProgressingAnnotation(c *util.RolloutContext) error {
|
||||
if c.Workload == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := c.Workload.Annotations[util.InRolloutProgressingAnnotation]; !ok {
|
||||
return nil
|
||||
}
|
||||
workloadRef := c.Rollout.Spec.ObjectRef.WorkloadRef
|
||||
workloadGVK := schema.FromAPIVersionAndKind(workloadRef.APIVersion, workloadRef.Kind)
|
||||
obj := util.GetEmptyWorkloadObject(workloadGVK)
|
||||
if err := m.Get(context.TODO(), types.NamespacedName{Name: c.Workload.Name, Namespace: c.Workload.Namespace}, obj); err != nil {
|
||||
klog.Errorf("getting updated workload(%s.%s) failed: %s", c.Workload.Namespace, c.Workload.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
body := fmt.Sprintf(`{"metadata":{"annotations":{"%s":null}}}`, util.InRolloutProgressingAnnotation)
|
||||
if err := m.Patch(context.TODO(), obj, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch workload(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, c.Workload.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("remove rollout(%s/%s) workload(%s) annotation[%s] success", c.Rollout.Namespace, c.Rollout.Name, c.Workload.Name, util.InRolloutProgressingAnnotation)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) runBatchRelease(rollout *v1alpha1.Rollout, rolloutId string, batch int32, isRollback bool) (bool, *v1alpha1.BatchRelease, error) {
|
||||
batch = batch - 1
|
||||
br, err := m.fetchBatchRelease(rollout.Namespace, rollout.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
// create new BatchRelease Crd
|
||||
br = createBatchRelease(rollout, rolloutId, batch, isRollback)
|
||||
if err = m.Create(context.TODO(), br); err != nil && !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("rollout(%s/%s) create BatchRelease failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, nil, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) create BatchRelease(%s) success", rollout.Namespace, rollout.Name, util.DumpJSON(br))
|
||||
return false, br, nil
|
||||
} else if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) fetch BatchRelease failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// check whether batchRelease configuration is the latest
|
||||
newBr := createBatchRelease(rollout, rolloutId, batch, isRollback)
|
||||
if reflect.DeepEqual(br.Spec, newBr.Spec) && reflect.DeepEqual(br.Annotations, newBr.Annotations) {
|
||||
klog.Infof("rollout(%s/%s) do batchRelease batch(%d) success", rollout.Namespace, rollout.Name, batch+1)
|
||||
return true, br, nil
|
||||
}
|
||||
|
||||
// update batchRelease to the latest version
|
||||
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err = m.Get(context.TODO(), client.ObjectKey{Namespace: newBr.Namespace, Name: newBr.Name}, br); err != nil {
|
||||
klog.Errorf("error getting BatchRelease(%s/%s) from client", newBr.Namespace, newBr.Name)
|
||||
return err
|
||||
}
|
||||
br.Spec = newBr.Spec
|
||||
br.Annotations = newBr.Annotations
|
||||
return m.Client.Update(context.TODO(), br)
|
||||
}); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) update batchRelease failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, nil, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) update batchRelease(%s) configuration to latest", rollout.Namespace, rollout.Name, util.DumpJSON(br))
|
||||
return false, br, nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) fetchBatchRelease(ns, name string) (*v1alpha1.BatchRelease, error) {
|
||||
br := &v1alpha1.BatchRelease{}
|
||||
// batchRelease.name is equal related rollout.name
|
||||
err := m.Get(context.TODO(), client.ObjectKey{Namespace: ns, Name: name}, br)
|
||||
return br, err
|
||||
}
|
||||
|
||||
func createBatchRelease(rollout *v1alpha1.Rollout, rolloutID string, batch int32, isRollback bool) *v1alpha1.BatchRelease {
|
||||
var batches []v1alpha1.ReleaseBatch
|
||||
for _, step := range rollout.Spec.Strategy.Canary.Steps {
|
||||
if step.Replicas == nil {
|
||||
batches = append(batches, v1alpha1.ReleaseBatch{CanaryReplicas: intstr.FromString(strconv.Itoa(int(*step.Weight)) + "%")})
|
||||
} else {
|
||||
batches = append(batches, v1alpha1.ReleaseBatch{CanaryReplicas: *step.Replicas})
|
||||
}
|
||||
}
|
||||
br := &v1alpha1.BatchRelease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: rollout.Namespace,
|
||||
Name: rollout.Name,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rollout, rolloutControllerKind)},
|
||||
},
|
||||
Spec: v1alpha1.BatchReleaseSpec{
|
||||
TargetRef: v1alpha1.ObjectRef{
|
||||
WorkloadRef: &v1alpha1.WorkloadRef{
|
||||
APIVersion: rollout.Spec.ObjectRef.WorkloadRef.APIVersion,
|
||||
Kind: rollout.Spec.ObjectRef.WorkloadRef.Kind,
|
||||
Name: rollout.Spec.ObjectRef.WorkloadRef.Name,
|
||||
},
|
||||
},
|
||||
ReleasePlan: v1alpha1.ReleasePlan{
|
||||
Batches: batches,
|
||||
RolloutID: rolloutID,
|
||||
BatchPartition: utilpointer.Int32Ptr(batch),
|
||||
FailureThreshold: rollout.Spec.Strategy.Canary.FailureThreshold,
|
||||
},
|
||||
},
|
||||
}
|
||||
if isRollback {
|
||||
if br.Annotations == nil {
|
||||
br.Annotations = map[string]string{}
|
||||
}
|
||||
br.Annotations[v1alpha1.RollbackInBatchAnnotation] = "true"
|
||||
}
|
||||
return br
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) removeBatchRelease(c *util.RolloutContext) (bool, error) {
|
||||
batch := &v1alpha1.BatchRelease{}
|
||||
err := m.Get(context.TODO(), client.ObjectKey{Namespace: c.Rollout.Namespace, Name: c.Rollout.Name}, batch)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) fetch BatchRelease failed: %s", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, err
|
||||
}
|
||||
if !batch.DeletionTimestamp.IsZero() {
|
||||
klog.Infof("rollout(%s/%s) BatchRelease is terminating, and wait a moment", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
//delete batchRelease
|
||||
err = m.Delete(context.TODO(), batch)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) delete BatchRelease failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) deleting BatchRelease, and wait a moment", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *canaryReleaseManager) finalizingBatchRelease(c *util.RolloutContext) (bool, error) {
|
||||
br, err := m.fetchBatchRelease(c.Rollout.Namespace, c.Rollout.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
waitReady := c.WaitReady
|
||||
// The Completed phase means batchRelease controller has processed all it
|
||||
// should process. If BatchRelease phase is completed, we can do nothing.
|
||||
if br.Spec.ReleasePlan.BatchPartition == nil &&
|
||||
br.Status.Phase == v1alpha1.RolloutPhaseCompleted {
|
||||
klog.Infof("rollout(%s/%s) finalizing batchRelease(%s) done", c.Rollout.Namespace, c.Rollout.Name, util.DumpJSON(br.Status))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If BatchPartition is nil, BatchRelease will directly resume workload via:
|
||||
// - * set workload Paused = false if it needs;
|
||||
// - * set workload Partition = null if it needs.
|
||||
if br.Spec.ReleasePlan.BatchPartition == nil {
|
||||
// - If checkReady is true, finalizing policy must be "WaitResume";
|
||||
// - If checkReady is false, finalizing policy must be NOT "WaitResume";
|
||||
// Otherwise, we should correct it.
|
||||
switch br.Spec.ReleasePlan.FinalizingPolicy {
|
||||
case v1alpha1.WaitResumeFinalizingPolicyType:
|
||||
if waitReady { // no need to patch again
|
||||
return false, nil
|
||||
}
|
||||
default:
|
||||
if !waitReady { // no need to patch again
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correct finalizing policy.
|
||||
policy := v1alpha1.ImmediateFinalizingPolicyType
|
||||
if waitReady {
|
||||
policy = v1alpha1.WaitResumeFinalizingPolicyType
|
||||
}
|
||||
|
||||
// Patch BatchPartition and FinalizingPolicy, BatchPartition always patch null here.
|
||||
body := fmt.Sprintf(`{"spec":{"releasePlan":{"batchPartition":null,"finalizingPolicy":"%s"}}}`, policy)
|
||||
if err = m.Patch(context.TODO(), br, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) patch batchRelease(%s) success", c.Rollout.Namespace, c.Rollout.Name, body)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// syncBatchRelease sync status of br to canaryStatus, and sync rollout-id of canaryStatus to br.
|
||||
func (m *canaryReleaseManager) syncBatchRelease(br *v1alpha1.BatchRelease, canaryStatus *v1alpha1.CanaryStatus) error {
|
||||
// sync from BatchRelease status to Rollout canaryStatus
|
||||
canaryStatus.CanaryReplicas = br.Status.CanaryStatus.UpdatedReplicas
|
||||
canaryStatus.CanaryReadyReplicas = br.Status.CanaryStatus.UpdatedReadyReplicas
|
||||
// Do not remove this line currently, otherwise, users will be not able to judge whether the BatchRelease works
|
||||
// in the scene where only rollout-id changed.
|
||||
// TODO: optimize the logic to better understand
|
||||
canaryStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d",
|
||||
br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1)
|
||||
|
||||
// sync rolloutId from canaryStatus to BatchRelease
|
||||
if canaryStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID {
|
||||
body := fmt.Sprintf(`{"spec":{"releasePlan":{"rolloutID":"%s"}}}`, canaryStatus.ObservedRolloutID)
|
||||
return m.Patch(context.TODO(), br, client.RawPatch(types.MergePatchType, []byte(body)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,340 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
func TestRunCanary(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getObj func() ([]*apps.Deployment, []*apps.ReplicaSet)
|
||||
getNetwork func() ([]*corev1.Service, []*netv1.Ingress)
|
||||
getRollout func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease)
|
||||
expectStatus func() *v1alpha1.RolloutStatus
|
||||
expectBr func() *v1alpha1.BatchRelease
|
||||
}{
|
||||
{
|
||||
name: "run canary upgrade1",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
return []*apps.Deployment{dep1}, []*apps.ReplicaSet{rs1}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 1
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.CurrentStepIndex = 1
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
expectBr: func() *v1alpha1.BatchRelease {
|
||||
br := batchDemo.DeepCopy()
|
||||
br.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(1),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(2),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(6),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(10),
|
||||
},
|
||||
}
|
||||
br.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return br
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "run canary traffic routing",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
dep2 := deploymentDemo.DeepCopy()
|
||||
dep2.UID = "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180"
|
||||
dep2.Name = dep1.Name + "-canary"
|
||||
dep2.Labels[util.CanaryDeploymentLabel] = dep1.Name
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
rs2 := rsDemo.DeepCopy()
|
||||
rs2.Name = "echoserver-canary-2"
|
||||
rs2.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
rs2.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return []*apps.Deployment{dep1, dep2}, []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 1
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
br := batchDemo.DeepCopy()
|
||||
br.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(1),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(2),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(6),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(10),
|
||||
},
|
||||
}
|
||||
br.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
br.Status = v1alpha1.BatchReleaseStatus{
|
||||
ObservedGeneration: 1,
|
||||
ObservedReleasePlanHash: "6d6a40791161e88ec0483688e951b589a4cbd0bf351974827706b79f99378fd5",
|
||||
CanaryStatus: v1alpha1.BatchReleaseCanaryStatus{
|
||||
CurrentBatchState: v1alpha1.ReadyBatchState,
|
||||
CurrentBatch: 0,
|
||||
UpdatedReplicas: 1,
|
||||
UpdatedReadyReplicas: 1,
|
||||
},
|
||||
}
|
||||
return obj, br
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CanaryReplicas = 1
|
||||
s.CanaryStatus.CanaryReadyReplicas = 1
|
||||
s.CanaryStatus.CurrentStepIndex = 1
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateTrafficRouting
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
expectBr: func() *v1alpha1.BatchRelease {
|
||||
br := batchDemo.DeepCopy()
|
||||
br.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(1),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(2),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(6),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(10),
|
||||
},
|
||||
}
|
||||
br.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return br
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
deps, rss := cs.getObj()
|
||||
rollout, br := cs.getRollout()
|
||||
fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rollout).Build()
|
||||
for _, rs := range rss {
|
||||
_ = fc.Create(context.TODO(), rs)
|
||||
}
|
||||
for _, dep := range deps {
|
||||
_ = fc.Create(context.TODO(), dep)
|
||||
}
|
||||
if br != nil {
|
||||
_ = fc.Create(context.TODO(), br)
|
||||
}
|
||||
ss, in := cs.getNetwork()
|
||||
_ = fc.Create(context.TODO(), ss[0])
|
||||
_ = fc.Create(context.TODO(), in[0])
|
||||
r := &RolloutReconciler{
|
||||
Client: fc,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
finder: util.NewControllerFinder(fc),
|
||||
trafficRoutingManager: trafficrouting.NewTrafficRoutingManager(fc),
|
||||
}
|
||||
r.canaryManager = &canaryReleaseManager{
|
||||
Client: fc,
|
||||
trafficRoutingManager: r.trafficRoutingManager,
|
||||
recorder: r.Recorder,
|
||||
}
|
||||
workload, _ := r.finder.GetWorkloadForRef("", rollout.Spec.ObjectRef.WorkloadRef)
|
||||
c := &util.RolloutContext{
|
||||
Rollout: rollout,
|
||||
NewStatus: rollout.Status.DeepCopy(),
|
||||
Workload: workload,
|
||||
}
|
||||
err := r.canaryManager.runCanary(c)
|
||||
if err != nil {
|
||||
t.Fatalf("reconcileRolloutProgressing failed: %s", err.Error())
|
||||
}
|
||||
checkBatchReleaseEqual(fc, t, client.ObjectKey{Name: rollout.Name}, cs.expectBr())
|
||||
cStatus := c.NewStatus.DeepCopy()
|
||||
cStatus.Message = ""
|
||||
if cStatus.CanaryStatus != nil {
|
||||
cStatus.CanaryStatus.LastUpdateTime = nil
|
||||
cStatus.CanaryStatus.Message = ""
|
||||
}
|
||||
cond := util.GetRolloutCondition(*cStatus, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Message = ""
|
||||
util.SetRolloutCondition(cStatus, *cond)
|
||||
if !reflect.DeepEqual(cs.expectStatus(), cStatus) {
|
||||
t.Fatalf("expect(%s), but get(%s)", util.DumpJSON(cs.expectStatus()), util.DumpJSON(cStatus))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunCanaryPaused(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getRollout func() *v1alpha1.Rollout
|
||||
expectStatus func() *v1alpha1.RolloutStatus
|
||||
}{
|
||||
{
|
||||
name: "paused, last step, 60% weight",
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 3
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStatePaused
|
||||
return obj
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
obj := rolloutDemo.Status.DeepCopy()
|
||||
obj.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.CanaryStatus.CurrentStepIndex = 3
|
||||
obj.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStatePaused
|
||||
return obj
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
rollout := cs.getRollout()
|
||||
fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rollout).Build()
|
||||
r := &RolloutReconciler{
|
||||
Client: fc,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
finder: util.NewControllerFinder(fc),
|
||||
trafficRoutingManager: trafficrouting.NewTrafficRoutingManager(fc),
|
||||
}
|
||||
r.canaryManager = &canaryReleaseManager{
|
||||
Client: fc,
|
||||
trafficRoutingManager: r.trafficRoutingManager,
|
||||
recorder: r.Recorder,
|
||||
}
|
||||
c := &util.RolloutContext{
|
||||
Rollout: rollout,
|
||||
NewStatus: rollout.Status.DeepCopy(),
|
||||
}
|
||||
err := r.canaryManager.runCanary(c)
|
||||
if err != nil {
|
||||
t.Fatalf("reconcileRolloutProgressing failed: %s", err.Error())
|
||||
}
|
||||
cStatus := c.NewStatus.DeepCopy()
|
||||
cStatus.CanaryStatus.LastUpdateTime = nil
|
||||
cStatus.CanaryStatus.Message = ""
|
||||
cStatus.Message = ""
|
||||
if !reflect.DeepEqual(cs.expectStatus(), cStatus) {
|
||||
t.Fatalf("expect(%s), but get(%s)", util.DumpJSON(cs.expectStatus()), util.DumpJSON(cStatus))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkBatchReleaseEqual(c client.WithWatch, t *testing.T, key client.ObjectKey, expect *v1alpha1.BatchRelease) {
|
||||
obj := &v1alpha1.BatchRelease{}
|
||||
err := c.Get(context.TODO(), key, obj)
|
||||
if err != nil {
|
||||
t.Fatalf("get object failed: %s", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(expect.Spec, obj.Spec) {
|
||||
t.Fatalf("expect(%s), but get(%s)", util.DumpJSON(expect.Spec), util.DumpJSON(obj.Spec))
|
||||
}
|
||||
}
|
||||
|
|
@ -22,7 +22,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -40,6 +41,8 @@ var (
|
|||
runtimeController controller.Controller
|
||||
workloadHandler handler.EventHandler
|
||||
watchedWorkload sync.Map
|
||||
|
||||
rolloutControllerKind = v1alpha1.SchemeGroupVersion.WithKind("Rollout")
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
@ -57,9 +60,11 @@ func init() {
|
|||
type RolloutReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
Recorder record.EventRecorder
|
||||
Finder *util.ControllerFinder
|
||||
|
||||
finder *util.ControllerFinder
|
||||
trafficRoutingManager *trafficrouting.Manager
|
||||
canaryManager *canaryReleaseManager
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=rollouts.kruise.io,resources=rollouts,verbs=get;list;watch;create;update;patch;delete
|
||||
|
|
@ -87,7 +92,7 @@ type RolloutReconciler struct {
|
|||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile
|
||||
func (r *RolloutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
// Fetch the Rollout instance
|
||||
rollout := &rolloutv1alpha1.Rollout{}
|
||||
rollout := &v1alpha1.Rollout{}
|
||||
err := r.Get(context.TODO(), req.NamespacedName, rollout)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
|
|
@ -112,7 +117,6 @@ func (r *RolloutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
} else if succeeded {
|
||||
watchedWorkload.LoadOrStore(workloadGVK.String(), struct{}{})
|
||||
klog.Infof("Rollout controller begin to watch workload type: %s", workloadGVK.String())
|
||||
|
||||
// return, and wait informer cache to be synced
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
|
@ -123,24 +127,32 @@ func (r *RolloutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// update rollout status
|
||||
done, err := r.updateRolloutStatus(rollout)
|
||||
// sync rollout status
|
||||
retry, newStatus, err := r.calculateRolloutStatus(rollout)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else if !done {
|
||||
return ctrl.Result{}, nil
|
||||
} else if retry {
|
||||
recheckTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
return ctrl.Result{RequeueAfter: time.Until(recheckTime)}, nil
|
||||
}
|
||||
|
||||
var recheckTime *time.Time
|
||||
switch rollout.Status.Phase {
|
||||
case rolloutv1alpha1.RolloutPhaseProgressing:
|
||||
recheckTime, err = r.reconcileRolloutProgressing(rollout)
|
||||
case rolloutv1alpha1.RolloutPhaseTerminating:
|
||||
recheckTime, err = r.reconcileRolloutTerminating(rollout)
|
||||
case v1alpha1.RolloutPhaseProgressing:
|
||||
recheckTime, err = r.reconcileRolloutProgressing(rollout, newStatus)
|
||||
case v1alpha1.RolloutPhaseTerminating:
|
||||
recheckTime, err = r.reconcileRolloutTerminating(rollout, newStatus)
|
||||
}
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else if recheckTime != nil {
|
||||
}
|
||||
if newStatus != nil {
|
||||
err = r.updateRolloutStatusInternal(rollout, *newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) status failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
if recheckTime != nil {
|
||||
return ctrl.Result{RequeueAfter: time.Until(*recheckTime)}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
|
@ -154,20 +166,25 @@ func (r *RolloutReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to rollout
|
||||
if err = c.Watch(&source.Kind{Type: &rolloutv1alpha1.Rollout{}}, &handler.EnqueueRequestForObject{}); err != nil {
|
||||
if err = c.Watch(&source.Kind{Type: &v1alpha1.Rollout{}}, &handler.EnqueueRequestForObject{}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Watch for changes to batchRelease
|
||||
if err = c.Watch(&source.Kind{Type: &rolloutv1alpha1.BatchRelease{}}, &enqueueRequestForBatchRelease{reader: mgr.GetCache()}); err != nil {
|
||||
if err = c.Watch(&source.Kind{Type: &v1alpha1.BatchRelease{}}, &enqueueRequestForBatchRelease{reader: mgr.GetCache()}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runtimeController = c
|
||||
workloadHandler = &enqueueRequestForWorkload{reader: mgr.GetCache(), scheme: r.Scheme}
|
||||
if err = util.AddWorkloadWatcher(c, workloadHandler); err != nil {
|
||||
return err
|
||||
}
|
||||
r.finder = util.NewControllerFinder(mgr.GetClient())
|
||||
r.trafficRoutingManager = trafficrouting.NewTrafficRoutingManager(mgr.GetClient())
|
||||
r.canaryManager = &canaryReleaseManager{
|
||||
Client: mgr.GetClient(),
|
||||
trafficRoutingManager: r.trafficRoutingManager,
|
||||
recorder: r.Recorder,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,12 +17,19 @@ limitations under the License.
|
|||
package rollout
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
kruisev1aplphal "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"github.com/openkruise/rollouts/pkg/util/configuration"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
|
@ -30,25 +37,66 @@ import (
|
|||
var (
|
||||
scheme *runtime.Scheme
|
||||
|
||||
rolloutDemo = &rolloutv1alpha1.Rollout{
|
||||
rolloutDemo = &v1alpha1.Rollout{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rollout-demo",
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
util.RolloutHashAnnotation: "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd",
|
||||
},
|
||||
Spec: rolloutv1alpha1.RolloutSpec{
|
||||
ObjectRef: rolloutv1alpha1.ObjectRef{
|
||||
WorkloadRef: &rolloutv1alpha1.WorkloadRef{
|
||||
},
|
||||
Spec: v1alpha1.RolloutSpec{
|
||||
ObjectRef: v1alpha1.ObjectRef{
|
||||
WorkloadRef: &v1alpha1.WorkloadRef{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
Strategy: rolloutv1alpha1.RolloutStrategy{
|
||||
Canary: &rolloutv1alpha1.CanaryStrategy{},
|
||||
Strategy: v1alpha1.RolloutStrategy{
|
||||
Canary: &v1alpha1.CanaryStrategy{
|
||||
Steps: []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(5),
|
||||
Replicas: &intstr.IntOrString{IntVal: 1},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(20),
|
||||
Replicas: &intstr.IntOrString{IntVal: 2},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(60),
|
||||
Replicas: &intstr.IntOrString{IntVal: 6},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
Replicas: &intstr.IntOrString{IntVal: 10},
|
||||
},
|
||||
},
|
||||
TrafficRoutings: []*v1alpha1.TrafficRouting{
|
||||
{
|
||||
Service: "echoserver",
|
||||
Ingress: &v1alpha1.IngressTrafficRouting{
|
||||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.RolloutStatus{
|
||||
Phase: v1alpha1.RolloutPhaseProgressing,
|
||||
CanaryStatus: &v1alpha1.CanaryStatus{},
|
||||
Conditions: []v1alpha1.RolloutCondition{
|
||||
{
|
||||
Type: v1alpha1.RolloutConditionProgressing,
|
||||
Reason: v1alpha1.ProgressingReasonInitializing,
|
||||
Status: corev1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
maxUnavailable = intstr.FromString("20%")
|
||||
deploymentDemo = &apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
|
|
@ -57,19 +105,42 @@ var (
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoserver",
|
||||
Labels: map[string]string{},
|
||||
Generation: 1,
|
||||
Annotations: map[string]string{
|
||||
util.InRolloutProgressingAnnotation: "rollout-demo",
|
||||
},
|
||||
Generation: 2,
|
||||
UID: types.UID("606132e0-85ef-460a-8cf5-cd8f915a8cc3"),
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: utilpointer.Int32(100),
|
||||
Replicas: utilpointer.Int32(10),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "echoserver",
|
||||
},
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "echoserver",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "echoserver:v2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: apps.DeploymentStatus{
|
||||
ObservedGeneration: 1,
|
||||
ObservedGeneration: 2,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -79,9 +150,10 @@ var (
|
|||
Kind: "ReplicaSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoserver-xxx",
|
||||
Name: "echoserver-1",
|
||||
Labels: map[string]string{
|
||||
"app": "echoserver",
|
||||
"pod-template-hash": "pod-template-hash-v1",
|
||||
},
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
|
|
@ -93,22 +165,148 @@ var (
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: utilpointer.Int32(10),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "echoserver",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "echoserver",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "echoserver:v1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
batchDemo = &rolloutv1alpha1.BatchRelease{
|
||||
batchDemo = &v1alpha1.BatchRelease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rollout-demo",
|
||||
Labels: map[string]string{},
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: rolloutv1alpha1.BatchReleaseSpec{
|
||||
TargetRef: rolloutv1alpha1.ObjectRef{
|
||||
WorkloadRef: &rolloutv1alpha1.WorkloadRef{
|
||||
Spec: v1alpha1.BatchReleaseSpec{
|
||||
TargetRef: v1alpha1.ObjectRef{
|
||||
WorkloadRef: &v1alpha1.WorkloadRef{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.BatchReleaseStatus{},
|
||||
}
|
||||
|
||||
demoService = corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoserver",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"app": "echoserver",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
demoIngress = netv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoserver",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: netv1.IngressSpec{
|
||||
Rules: []netv1.IngressRule{
|
||||
{
|
||||
Host: "echoserver.example.com",
|
||||
IngressRuleValue: netv1.IngressRuleValue{
|
||||
HTTP: &netv1.HTTPIngressRuleValue{
|
||||
Paths: []netv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/apis/echo",
|
||||
Backend: netv1.IngressBackend{
|
||||
Service: &netv1.IngressServiceBackend{
|
||||
Name: "echoserver",
|
||||
Port: netv1.ServiceBackendPort{
|
||||
Name: "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
demoConf = corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configuration.RolloutConfigurationName,
|
||||
Namespace: util.GetRolloutNamespace(),
|
||||
},
|
||||
Data: map[string]string{
|
||||
fmt.Sprintf("%s.nginx", configuration.LuaTrafficRoutingIngressTypePrefix): `
|
||||
annotations = obj.annotations
|
||||
annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-cookie"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-pattern"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-value"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-weight"] = nil
|
||||
if ( obj.weight ~= "-1" )
|
||||
then
|
||||
annotations["nginx.ingress.kubernetes.io/canary-weight"] = obj.weight
|
||||
end
|
||||
if ( not obj.matches )
|
||||
then
|
||||
return annotations
|
||||
end
|
||||
for _,match in ipairs(obj.matches) do
|
||||
header = match.headers[1]
|
||||
if ( header.name == "canary-by-cookie" )
|
||||
then
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-cookie"] = header.value
|
||||
else
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header"] = header.name
|
||||
if ( header.type == "RegularExpression" )
|
||||
then
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-pattern"] = header.value
|
||||
else
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-value"] = header.value
|
||||
end
|
||||
end
|
||||
end
|
||||
return annotations
|
||||
`,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -116,5 +314,5 @@ func init() {
|
|||
scheme = runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = kruisev1aplphal.AddToScheme(scheme)
|
||||
_ = rolloutv1alpha1.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
utilclient "github.com/openkruise/rollouts/pkg/util/client"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
|
@ -121,11 +120,7 @@ func (w *enqueueRequestForBatchRelease) Update(evt event.UpdateEvent, q workqueu
|
|||
}
|
||||
|
||||
func (w *enqueueRequestForBatchRelease) handleEvent(q workqueue.RateLimitingInterface, obj client.Object) {
|
||||
rollout := obj.GetLabels()[batchrelease.BatchReleaseOwnerRefLabel]
|
||||
if rollout == "" {
|
||||
return
|
||||
}
|
||||
klog.Infof("BatchRelease(%s/%s) and reconcile Rollout (%s)", obj.GetNamespace(), obj.GetName(), rollout)
|
||||
nsn := types.NamespacedName{Namespace: obj.GetNamespace(), Name: rollout}
|
||||
klog.Infof("BatchRelease(%s/%s) and reconcile Rollout (%s)", obj.GetNamespace(), obj.GetName(), obj.GetName())
|
||||
nsn := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}
|
||||
q.Add(reconcile.Request{NamespacedName: nsn})
|
||||
}
|
||||
|
|
@ -0,0 +1,368 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var defaultGracePeriodSeconds int32 = 3
|
||||
|
||||
// parameter1 retryReconcile, parameter2 error
|
||||
func (r *RolloutReconciler) reconcileRolloutProgressing(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
cond := util.GetRolloutCondition(rollout.Status, v1alpha1.RolloutConditionProgressing)
|
||||
klog.Infof("reconcile rollout(%s/%s) progressing action...", rollout.Namespace, rollout.Name)
|
||||
workload, err := r.finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get workload failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if workload == nil {
|
||||
klog.Errorf("rollout(%s/%s) workload Not Found", rollout.Namespace, rollout.Name)
|
||||
return nil, nil
|
||||
} else if !workload.IsStatusConsistent {
|
||||
klog.Infof("rollout(%s/%s) workload status is inconsistent, then wait a moment", rollout.Namespace, rollout.Name)
|
||||
return nil, nil
|
||||
}
|
||||
rolloutContext := &util.RolloutContext{Rollout: rollout, NewStatus: newStatus, Workload: workload}
|
||||
switch cond.Reason {
|
||||
case v1alpha1.ProgressingReasonInitializing:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
// new canaryStatus
|
||||
newStatus.CanaryStatus = &v1alpha1.CanaryStatus{
|
||||
ObservedWorkloadGeneration: rolloutContext.Workload.Generation,
|
||||
RolloutHash: rolloutContext.Rollout.Annotations[util.RolloutHashAnnotation],
|
||||
ObservedRolloutID: getRolloutID(rolloutContext.Workload),
|
||||
StableRevision: rolloutContext.Workload.StableRevision,
|
||||
CanaryRevision: rolloutContext.Workload.CanaryRevision,
|
||||
CurrentStepIndex: 1,
|
||||
CurrentStepState: v1alpha1.CanaryStepStateUpgrade,
|
||||
LastUpdateTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
done, err := r.doProgressingInitializing(rolloutContext)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) doProgressingInitializing error(%s)", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, v1alpha1.ProgressingReasonInRolling, "Rollout is in Progressing")
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
rolloutContext.RecheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) doProgressingInitializing is incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
|
||||
case v1alpha1.ProgressingReasonInRolling:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
err = r.doProgressingInRolling(rolloutContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case v1alpha1.ProgressingReasonFinalising:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
var done bool
|
||||
rolloutContext.WaitReady = true
|
||||
done, err = r.doFinalising(rolloutContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// finalizer is finished
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, v1alpha1.ProgressingReasonCompleted, "Rollout progressing has been completed")
|
||||
setRolloutSucceededCondition(newStatus, corev1.ConditionTrue)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
rolloutContext.RecheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) doProgressingFinalising is incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
|
||||
case v1alpha1.ProgressingReasonPaused:
|
||||
// from paused to rolling progressing
|
||||
if !rollout.Spec.Strategy.Paused {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, from paused to rolling", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, v1alpha1.ProgressingReasonInRolling, "")
|
||||
}
|
||||
|
||||
case v1alpha1.ProgressingReasonCancelling:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
var done bool
|
||||
done, err = r.doFinalising(rolloutContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// finalizer is finished
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, v1alpha1.ProgressingReasonCompleted, "Rollout progressing has been cancelled")
|
||||
setRolloutSucceededCondition(newStatus, corev1.ConditionFalse)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
rolloutContext.RecheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) doProgressingCancelling is incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
|
||||
case v1alpha1.ProgressingReasonCompleted:
|
||||
// rollout phase from progressing to healthy
|
||||
klog.Infof("rollout(%s/%s) phase is from progressing to healthy", rollout.Namespace, rollout.Name)
|
||||
newStatus.Phase = v1alpha1.RolloutPhaseHealthy
|
||||
}
|
||||
|
||||
return rolloutContext.RecheckTime, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doProgressingInitializing(c *util.RolloutContext) (bool, error) {
|
||||
// Traffic routing
|
||||
if len(c.Rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
if err := r.trafficRoutingManager.InitializeTrafficRouting(c); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// It is not allowed to modify the rollout.spec in progressing phase (validate webhook rollout),
|
||||
// but in many scenarios the user may modify the workload and rollout spec at the same time,
|
||||
// and there is a possibility that the workload is released first, and due to some network or other reasons the rollout spec is delayed by a few seconds,
|
||||
// so this is mainly compatible with this scenario.
|
||||
cond := util.GetRolloutCondition(*c.NewStatus, v1alpha1.RolloutConditionProgressing)
|
||||
if verifyTime := cond.LastUpdateTime.Add(time.Second * time.Duration(defaultGracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("verify rollout(%s/%s) TrafficRouting, and wait a moment", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doProgressingInRolling(c *util.RolloutContext) error {
|
||||
// Handle the 5 special cases firstly, and we had better keep the order of following cases:
|
||||
|
||||
switch {
|
||||
// 1. In case of rollback in a quick way, un-paused and just use workload rolling strategy
|
||||
case isRollingBackDirectly(c.Rollout, c.Workload):
|
||||
return r.handleRollbackDirectly(c.Rollout, c.Workload, c.NewStatus)
|
||||
|
||||
// 2. In case of rollout paused, just stop reconcile
|
||||
case isRolloutPaused(c.Rollout):
|
||||
return r.handleRolloutPaused(c.Rollout, c.NewStatus)
|
||||
|
||||
// 3. In case of rollback in a batch way, use rollout step strategy
|
||||
case isRollingBackInBatches(c.Rollout, c.Workload):
|
||||
return r.handleRollbackInBatches(c.Rollout, c.Workload, c.NewStatus)
|
||||
|
||||
// 4. In case of continuous publishing(v1 -> v2 -> v3), restart publishing
|
||||
case isContinuousRelease(c.Rollout, c.Workload):
|
||||
return r.handleContinuousRelease(c)
|
||||
|
||||
// 5. In case of rollout plan changed, recalculate and publishing
|
||||
case isRolloutPlanChanged(c.Rollout):
|
||||
return r.handleRolloutPlanChanged(c)
|
||||
}
|
||||
return r.handleNormalRolling(c)
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRolloutPaused(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus) error {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, but paused", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, v1alpha1.ProgressingReasonPaused, "Rollout has been paused, you can resume it by kube-cli")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleContinuousRelease(c *util.RolloutContext) error {
|
||||
r.Recorder.Eventf(c.Rollout, corev1.EventTypeNormal, "Progressing", "workload continuous publishing canaryRevision, then restart publishing")
|
||||
klog.Infof("rollout(%s/%s) workload continuous publishing canaryRevision from(%s) -> to(%s), then restart publishing",
|
||||
c.Rollout.Namespace, c.Rollout.Name, c.NewStatus.CanaryStatus.CanaryRevision, c.Workload.CanaryRevision)
|
||||
|
||||
done, err := r.doProgressingReset(c)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) doProgressingReset failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return err
|
||||
} else if done {
|
||||
c.NewStatus.CanaryStatus = nil
|
||||
progressingStateTransition(c.NewStatus, corev1.ConditionTrue, v1alpha1.ProgressingReasonInitializing, "Workload is continuous release")
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset complete", c.Rollout.Namespace, c.Rollout.Name)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
c.RecheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset incomplete, and recheck(%s)", c.Rollout.Namespace, c.Rollout.Name, expectedTime.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRollbackDirectly(rollout *v1alpha1.Rollout, workload *util.Workload, newStatus *v1alpha1.RolloutStatus) error {
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload has been rollback, then rollout is canceled")
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback directly, then rollout canceled", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, v1alpha1.ProgressingReasonCancelling, "The workload has been rolled back and the rollout process will be cancelled")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRollbackInBatches(rollout *v1alpha1.Rollout, workload *util.Workload, newStatus *v1alpha1.RolloutStatus) error {
|
||||
// restart from the beginning
|
||||
newStatus.CanaryStatus.CurrentStepIndex = 1
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
newStatus.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
newStatus.CanaryStatus.RolloutHash = rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback in batches, then restart from beginning", rollout.Namespace, rollout.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRolloutPlanChanged(c *util.RolloutContext) error {
|
||||
newStepIndex, err := r.recalculateCanaryStep(c)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) reCalculate Canary StepIndex failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
// canary step configuration change causes current step index change
|
||||
c.NewStatus.CanaryStatus.CurrentStepIndex = newStepIndex
|
||||
c.NewStatus.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
c.NewStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
c.NewStatus.CanaryStatus.RolloutHash = c.Rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) canary step configuration change, and stepIndex(%d) state(%s)",
|
||||
c.Rollout.Namespace, c.Rollout.Name, c.NewStatus.CanaryStatus.CurrentStepIndex, c.NewStatus.CanaryStatus.CurrentStepState)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleNormalRolling(c *util.RolloutContext) error {
|
||||
//check if canary is done
|
||||
if c.NewStatus.CanaryStatus.CurrentStepState == v1alpha1.CanaryStepStateCompleted {
|
||||
klog.Infof("rollout(%s/%s) progressing rolling done", c.Rollout.Namespace, c.Rollout.Name)
|
||||
progressingStateTransition(c.NewStatus, corev1.ConditionTrue, v1alpha1.ProgressingReasonFinalising, "Rollout has been completed and some closing work is being done")
|
||||
} else { // rollout is in rolling
|
||||
return r.canaryManager.runCanary(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/* **********************************************************************
|
||||
help functions
|
||||
*********************************************************************** */
|
||||
func isRolloutPaused(rollout *v1alpha1.Rollout) bool {
|
||||
return rollout.Spec.Strategy.Paused
|
||||
}
|
||||
|
||||
func isRolloutPlanChanged(rollout *v1alpha1.Rollout) bool {
|
||||
status := &rollout.Status
|
||||
return status.CanaryStatus.RolloutHash != "" && status.CanaryStatus.RolloutHash != rollout.Annotations[util.RolloutHashAnnotation]
|
||||
}
|
||||
|
||||
func isContinuousRelease(rollout *v1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
return status.CanaryStatus.CanaryRevision != "" && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && !workload.IsInRollback
|
||||
}
|
||||
|
||||
func isRollingBackDirectly(rollout *v1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
inBatch := util.IsRollbackInBatchPolicy(rollout, workload.Labels)
|
||||
return workload.IsInRollback && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && !inBatch
|
||||
}
|
||||
|
||||
func isRollingBackInBatches(rollout *v1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
inBatch := util.IsRollbackInBatchPolicy(rollout, workload.Labels)
|
||||
return workload.IsInRollback && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && inBatch
|
||||
}
|
||||
|
||||
// 1. modify network api(ingress or gateway api) configuration, and route 100% traffic to stable pods
|
||||
// 2. remove batchRelease CR.
|
||||
func (r *RolloutReconciler) doProgressingReset(c *util.RolloutContext) (bool, error) {
|
||||
if len(c.Rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
// modify network api(ingress or gateway api) configuration, and route 100% traffic to stable pods
|
||||
done, err := r.trafficRoutingManager.FinalisingTrafficRouting(c, false)
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
}
|
||||
done, err := r.canaryManager.removeBatchRelease(c)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) DoFinalising batchRelease failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return false, err
|
||||
} else if !done {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) recalculateCanaryStep(c *util.RolloutContext) (int32, error) {
|
||||
batch, err := r.canaryManager.fetchBatchRelease(c.Rollout.Namespace, c.Rollout.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
return 1, nil
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
currentReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.Spec.ReleasePlan.Batches[*batch.Spec.ReleasePlan.BatchPartition].CanaryReplicas, int(c.Workload.Replicas), true)
|
||||
var stepIndex int32
|
||||
for i := range c.Rollout.Spec.Strategy.Canary.Steps {
|
||||
step := c.Rollout.Spec.Strategy.Canary.Steps[i]
|
||||
var desiredReplicas int
|
||||
if step.Replicas != nil {
|
||||
desiredReplicas, _ = intstr.GetScaledValueFromIntOrPercent(step.Replicas, int(c.Workload.Replicas), true)
|
||||
} else {
|
||||
replicas := intstr.FromString(strconv.Itoa(int(*step.Weight)) + "%")
|
||||
desiredReplicas, _ = intstr.GetScaledValueFromIntOrPercent(&replicas, int(c.Workload.Replicas), true)
|
||||
}
|
||||
stepIndex = int32(i + 1)
|
||||
if currentReplicas <= desiredReplicas {
|
||||
break
|
||||
}
|
||||
}
|
||||
return stepIndex, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doFinalising(c *util.RolloutContext) (bool, error) {
|
||||
klog.Infof("reconcile rollout(%s/%s) doFinalising", c.Rollout.Namespace, c.Rollout.Name)
|
||||
done, err := r.canaryManager.doCanaryFinalising(c)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Progressing failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return false, err
|
||||
} else if !done {
|
||||
klog.Infof("rollout(%s/%s) finalizer is not finished, and retry reconcile", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("run rollout(%s/%s) Progressing Finalising done", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func progressingStateTransition(status *v1alpha1.RolloutStatus, condStatus corev1.ConditionStatus, reason, message string) {
|
||||
cond := util.GetRolloutCondition(*status, v1alpha1.RolloutConditionProgressing)
|
||||
if cond == nil {
|
||||
cond = util.NewRolloutCondition(v1alpha1.RolloutConditionProgressing, condStatus, reason, message)
|
||||
} else {
|
||||
cond.Status = condStatus
|
||||
cond.Reason = reason
|
||||
if message != "" {
|
||||
cond.Message = message
|
||||
}
|
||||
}
|
||||
util.SetRolloutCondition(status, *cond)
|
||||
status.Message = cond.Message
|
||||
}
|
||||
|
||||
func setRolloutSucceededCondition(status *v1alpha1.RolloutStatus, condStatus corev1.ConditionStatus) {
|
||||
cond := util.GetRolloutCondition(*status, v1alpha1.RolloutConditionSucceeded)
|
||||
if cond == nil {
|
||||
cond = util.NewRolloutCondition(v1alpha1.RolloutConditionSucceeded, condStatus, "", "")
|
||||
} else {
|
||||
cond.Status = condStatus
|
||||
}
|
||||
util.SetRolloutCondition(status, *cond)
|
||||
}
|
||||
|
|
@ -0,0 +1,832 @@
|
|||
/*
|
||||
Copyright 2021.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
func TestReconcileRolloutProgressing(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getObj func() ([]*apps.Deployment, []*apps.ReplicaSet)
|
||||
getNetwork func() ([]*corev1.Service, []*netv1.Ingress)
|
||||
getRollout func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease)
|
||||
expectStatus func() *v1alpha1.RolloutStatus
|
||||
}{
|
||||
{
|
||||
name: "ReconcileRolloutProgressing init -> rolling",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
return []*apps.Deployment{dep1}, []*apps.ReplicaSet{rs1}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.CurrentStepIndex = 1
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing rolling1",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
dep2 := deploymentDemo.DeepCopy()
|
||||
dep2.UID = "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180"
|
||||
dep2.Name = dep1.Name + "-canary"
|
||||
dep2.Labels[util.CanaryDeploymentLabel] = dep1.Name
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
rs2 := rsDemo.DeepCopy()
|
||||
rs2.Name = "echoserver-canary-2"
|
||||
rs2.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
rs2.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return []*apps.Deployment{dep1, dep2}, []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 1
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 1
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing rolling -> finalizing",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
dep2 := deploymentDemo.DeepCopy()
|
||||
dep2.UID = "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180"
|
||||
dep2.Name = dep1.Name + "-canary"
|
||||
dep2.Labels[util.CanaryDeploymentLabel] = dep1.Name
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
rs2 := rsDemo.DeepCopy()
|
||||
rs2.Name = "echoserver-canary-2"
|
||||
rs2.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
rs2.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return []*apps.Deployment{dep1, dep2}, []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 4
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonFinalising
|
||||
cond.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing finalizing1",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
delete(dep1.Annotations, util.InRolloutProgressingAnnotation)
|
||||
dep1.Status = apps.DeploymentStatus{
|
||||
ObservedGeneration: 2,
|
||||
Replicas: 10,
|
||||
UpdatedReplicas: 5,
|
||||
ReadyReplicas: 10,
|
||||
AvailableReplicas: 10,
|
||||
}
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
return []*apps.Deployment{dep1}, []*apps.ReplicaSet{rs1}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonFinalising
|
||||
cond.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
br := batchDemo.DeepCopy()
|
||||
br.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(1),
|
||||
},
|
||||
}
|
||||
return obj, br
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 4
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonFinalising
|
||||
cond.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing finalizing2",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
delete(dep1.Annotations, util.InRolloutProgressingAnnotation)
|
||||
dep1.Status = apps.DeploymentStatus{
|
||||
ObservedGeneration: 2,
|
||||
Replicas: 10,
|
||||
UpdatedReplicas: 10,
|
||||
ReadyReplicas: 10,
|
||||
AvailableReplicas: 10,
|
||||
}
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
return []*apps.Deployment{dep1}, []*apps.ReplicaSet{rs1}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonFinalising
|
||||
cond.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
br := batchDemo.DeepCopy()
|
||||
br.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(1),
|
||||
},
|
||||
}
|
||||
br.Status.Phase = v1alpha1.RolloutPhaseCompleted
|
||||
return obj, br
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 4
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond2 := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond2.Reason = v1alpha1.ProgressingReasonFinalising
|
||||
cond2.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(s, *cond2)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing finalizing -> succeeded",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
delete(dep1.Annotations, util.InRolloutProgressingAnnotation)
|
||||
dep1.Status = apps.DeploymentStatus{
|
||||
ObservedGeneration: 2,
|
||||
Replicas: 10,
|
||||
UpdatedReplicas: 10,
|
||||
ReadyReplicas: 10,
|
||||
AvailableReplicas: 10,
|
||||
}
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
return []*apps.Deployment{dep1}, []*apps.ReplicaSet{rs1}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonFinalising
|
||||
cond.Status = corev1.ConditionTrue
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 4
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
cond2 := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond2.Reason = v1alpha1.ProgressingReasonCompleted
|
||||
cond2.Status = corev1.ConditionFalse
|
||||
util.SetRolloutCondition(s, *cond2)
|
||||
cond1 := util.NewRolloutCondition(v1alpha1.RolloutConditionSucceeded, corev1.ConditionTrue, "", "")
|
||||
cond1.LastUpdateTime = metav1.Time{}
|
||||
cond1.LastTransitionTime = metav1.Time{}
|
||||
util.SetRolloutCondition(s, *cond1)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing rolling -> rollback",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
dep1.Spec.Template.Spec.Containers[0].Image = "echoserver:v1"
|
||||
dep2 := deploymentDemo.DeepCopy()
|
||||
dep2.UID = "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180"
|
||||
dep2.Name = dep1.Name + "-canary"
|
||||
dep2.Labels[util.CanaryDeploymentLabel] = dep1.Name
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
rs2 := rsDemo.DeepCopy()
|
||||
rs2.Name = "echoserver-canary-2"
|
||||
rs2.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
rs2.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return []*apps.Deployment{dep1, dep2}, []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 1
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "5d48f79ff8"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 1
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonCancelling
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing rolling -> rollback",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
dep1.Spec.Template.Spec.Containers[0].Image = "echoserver:v1"
|
||||
dep2 := deploymentDemo.DeepCopy()
|
||||
dep2.UID = "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180"
|
||||
dep2.Name = dep1.Name + "-canary"
|
||||
dep2.Labels[util.CanaryDeploymentLabel] = dep1.Name
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
rs2 := rsDemo.DeepCopy()
|
||||
rs2.Name = "echoserver-canary-2"
|
||||
rs2.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
rs2.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return []*apps.Deployment{dep1, dep2}, []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 1
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
s.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
s.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
s.CanaryStatus.CanaryRevision = "5d48f79ff8"
|
||||
s.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
s.CanaryStatus.CurrentStepIndex = 1
|
||||
s.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonCancelling
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ReconcileRolloutProgressing rolling -> continueRelease",
|
||||
getObj: func() ([]*apps.Deployment, []*apps.ReplicaSet) {
|
||||
dep1 := deploymentDemo.DeepCopy()
|
||||
dep1.Spec.Template.Spec.Containers[0].Image = "echoserver:v3"
|
||||
dep2 := deploymentDemo.DeepCopy()
|
||||
dep2.UID = "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180"
|
||||
dep2.Name = dep1.Name + "-canary"
|
||||
dep2.Labels[util.CanaryDeploymentLabel] = dep1.Name
|
||||
rs1 := rsDemo.DeepCopy()
|
||||
rs2 := rsDemo.DeepCopy()
|
||||
rs2.Name = "echoserver-canary-2"
|
||||
rs2.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: dep2.Name,
|
||||
UID: "1ca4d850-9ec3-48bd-84cb-19f2e8cf4180",
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
rs2.Labels["pod-template-hash"] = "pod-template-hash-v2"
|
||||
rs2.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return []*apps.Deployment{dep1, dep2}, []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getNetwork: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *v1alpha1.BatchRelease) {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Status.CanaryStatus.ObservedWorkloadGeneration = 2
|
||||
obj.Status.CanaryStatus.RolloutHash = "f55bvd874d5f2fzvw46bv966x4bwbdv4wx6bd9f7b46ww788954b8z8w29b7wxfd"
|
||||
obj.Status.CanaryStatus.StableRevision = "pod-template-hash-v1"
|
||||
obj.Status.CanaryStatus.CanaryRevision = "56855c89f9"
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 3
|
||||
obj.Status.CanaryStatus.CanaryReplicas = 5
|
||||
obj.Status.CanaryStatus.CanaryReadyReplicas = 3
|
||||
obj.Status.CanaryStatus.PodTemplateHash = "pod-template-hash-v2"
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateUpgrade
|
||||
cond := util.GetRolloutCondition(obj.Status, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInRolling
|
||||
util.SetRolloutCondition(&obj.Status, *cond)
|
||||
return obj, nil
|
||||
},
|
||||
expectStatus: func() *v1alpha1.RolloutStatus {
|
||||
s := rolloutDemo.Status.DeepCopy()
|
||||
s.CanaryStatus = nil
|
||||
cond := util.GetRolloutCondition(*s, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Reason = v1alpha1.ProgressingReasonInitializing
|
||||
util.SetRolloutCondition(s, *cond)
|
||||
return s
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
deps, rss := cs.getObj()
|
||||
rollout, br := cs.getRollout()
|
||||
fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rollout, demoConf.DeepCopy()).Build()
|
||||
for _, rs := range rss {
|
||||
_ = fc.Create(context.TODO(), rs)
|
||||
}
|
||||
for _, dep := range deps {
|
||||
_ = fc.Create(context.TODO(), dep)
|
||||
}
|
||||
if br != nil {
|
||||
_ = fc.Create(context.TODO(), br)
|
||||
}
|
||||
ss, in := cs.getNetwork()
|
||||
for _, obj := range ss {
|
||||
_ = fc.Create(context.TODO(), obj)
|
||||
}
|
||||
for _, obj := range in {
|
||||
_ = fc.Create(context.TODO(), obj)
|
||||
}
|
||||
r := &RolloutReconciler{
|
||||
Client: fc,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
finder: util.NewControllerFinder(fc),
|
||||
trafficRoutingManager: trafficrouting.NewTrafficRoutingManager(fc),
|
||||
}
|
||||
r.canaryManager = &canaryReleaseManager{
|
||||
Client: fc,
|
||||
trafficRoutingManager: r.trafficRoutingManager,
|
||||
recorder: r.Recorder,
|
||||
}
|
||||
newStatus := rollout.Status.DeepCopy()
|
||||
_, err := r.reconcileRolloutProgressing(rollout, newStatus)
|
||||
if err != nil {
|
||||
t.Fatalf("reconcileRolloutProgressing failed: %s", err.Error())
|
||||
}
|
||||
_ = r.updateRolloutStatusInternal(rollout, *newStatus)
|
||||
checkRolloutEqual(fc, t, client.ObjectKey{Name: rollout.Name}, cs.expectStatus())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkRolloutEqual(c client.WithWatch, t *testing.T, key client.ObjectKey, expect *v1alpha1.RolloutStatus) {
|
||||
obj := &v1alpha1.Rollout{}
|
||||
err := c.Get(context.TODO(), key, obj)
|
||||
if err != nil {
|
||||
t.Fatalf("get object failed: %s", err.Error())
|
||||
}
|
||||
cStatus := obj.Status.DeepCopy()
|
||||
cStatus.Message = ""
|
||||
if cStatus.CanaryStatus != nil {
|
||||
cStatus.CanaryStatus.LastUpdateTime = nil
|
||||
}
|
||||
cond1 := util.GetRolloutCondition(*cStatus, v1alpha1.RolloutConditionProgressing)
|
||||
cond1.Message = ""
|
||||
util.SetRolloutCondition(cStatus, *cond1)
|
||||
cond2 := util.GetRolloutCondition(*cStatus, v1alpha1.RolloutConditionSucceeded)
|
||||
if cond2 != nil {
|
||||
util.RemoveRolloutCondition(cStatus, v1alpha1.RolloutConditionSucceeded)
|
||||
cond2.LastUpdateTime = metav1.Time{}
|
||||
cond2.LastTransitionTime = metav1.Time{}
|
||||
util.SetRolloutCondition(cStatus, *cond2)
|
||||
}
|
||||
if !reflect.DeepEqual(expect, cStatus) {
|
||||
t.Fatalf("expect(%s), but get(%s)", util.DumpJSON(expect), util.DumpJSON(cStatus))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReCalculateCanaryStepIndex(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getObj func() (*apps.Deployment, *apps.ReplicaSet)
|
||||
getRollout func() *v1alpha1.Rollout
|
||||
getBatchRelease func() *v1alpha1.BatchRelease
|
||||
expectStepIndex int32
|
||||
}{
|
||||
{
|
||||
name: "steps changed v1",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(20),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(50),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *v1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("60%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 2,
|
||||
},
|
||||
{
|
||||
name: "steps changed v2",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(20),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(40),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *v1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("60%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 2,
|
||||
},
|
||||
{
|
||||
name: "steps changed v3",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(40),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(60),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *v1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("20%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(1)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 1,
|
||||
},
|
||||
{
|
||||
name: "steps changed v4",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(10),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(30),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *v1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("20%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("40%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 2,
|
||||
},
|
||||
{
|
||||
name: "steps changed v5",
|
||||
getObj: func() (*apps.Deployment, *apps.ReplicaSet) {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
return obj, rsDemo.DeepCopy()
|
||||
},
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(2),
|
||||
Replicas: &intstr.IntOrString{
|
||||
Type: intstr.String,
|
||||
StrVal: "10%",
|
||||
},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(3),
|
||||
Replicas: &intstr.IntOrString{
|
||||
Type: intstr.String,
|
||||
StrVal: "10%",
|
||||
},
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
getBatchRelease: func() *v1alpha1.BatchRelease {
|
||||
obj := batchDemo.DeepCopy()
|
||||
obj.Spec.ReleasePlan.Batches = []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("10%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("20%"),
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("30%"),
|
||||
},
|
||||
}
|
||||
obj.Spec.ReleasePlan.BatchPartition = utilpointer.Int32(0)
|
||||
return obj
|
||||
},
|
||||
expectStepIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
client.Create(context.TODO(), cs.getBatchRelease())
|
||||
dep, rs := cs.getObj()
|
||||
client.Create(context.TODO(), dep)
|
||||
client.Create(context.TODO(), rs)
|
||||
client.Create(context.TODO(), cs.getRollout())
|
||||
reconciler := &RolloutReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
finder: util.NewControllerFinder(client),
|
||||
}
|
||||
reconciler.canaryManager = &canaryReleaseManager{
|
||||
Client: client,
|
||||
trafficRoutingManager: reconciler.trafficRoutingManager,
|
||||
recorder: reconciler.Recorder,
|
||||
}
|
||||
rollout := cs.getRollout()
|
||||
workload, err := reconciler.finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
c := &util.RolloutContext{Rollout: rollout, Workload: workload}
|
||||
newStepIndex, err := reconciler.recalculateCanaryStep(c)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
if cs.expectStepIndex != newStepIndex {
|
||||
t.Fatalf("expect %d, but %d", cs.expectStepIndex, newStepIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
func (r *RolloutReconciler) calculateRolloutStatus(rollout *v1alpha1.Rollout) (retry bool, newStatus *v1alpha1.RolloutStatus, err error) {
|
||||
// hash rollout
|
||||
if err = r.calculateRolloutHash(rollout); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
newStatus = rollout.Status.DeepCopy()
|
||||
newStatus.ObservedGeneration = rollout.GetGeneration()
|
||||
// delete rollout CRD
|
||||
if !rollout.DeletionTimestamp.IsZero() {
|
||||
if newStatus.Phase != v1alpha1.RolloutPhaseTerminating {
|
||||
newStatus.Phase = v1alpha1.RolloutPhaseTerminating
|
||||
cond := util.NewRolloutCondition(v1alpha1.RolloutConditionTerminating, corev1.ConditionTrue, v1alpha1.TerminatingReasonInTerminating, "Rollout is in terminating")
|
||||
util.SetRolloutCondition(newStatus, *cond)
|
||||
}
|
||||
return false, newStatus, nil
|
||||
}
|
||||
if newStatus.Phase == "" {
|
||||
newStatus.Phase = v1alpha1.RolloutPhaseInitial
|
||||
}
|
||||
// get ref workload
|
||||
workload, err := r.finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get workload failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return false, nil, err
|
||||
} else if workload == nil {
|
||||
newStatus = &v1alpha1.RolloutStatus{
|
||||
ObservedGeneration: rollout.Generation,
|
||||
Phase: v1alpha1.RolloutPhaseInitial,
|
||||
Message: "Workload Not Found",
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) workload not found, and reset status be Initial", rollout.Namespace, rollout.Name)
|
||||
return false, newStatus, nil
|
||||
}
|
||||
// workload status generation is not equal to workload.generation
|
||||
if !workload.IsStatusConsistent {
|
||||
klog.Infof("rollout(%s/%s) workload status is inconsistent, then wait a moment", rollout.Namespace, rollout.Name)
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// update workload generation to canaryStatus.ObservedWorkloadGeneration
|
||||
// rollout is a target ref bypass, so there needs to be a field to identify the rollout execution process or results,
|
||||
// which version of deployment is targeted, ObservedWorkloadGeneration that is to compare with the workload generation
|
||||
if newStatus.CanaryStatus != nil && newStatus.CanaryStatus.CanaryRevision != "" &&
|
||||
newStatus.CanaryStatus.CanaryRevision == workload.CanaryRevision {
|
||||
newStatus.CanaryStatus.ObservedRolloutID = getRolloutID(workload)
|
||||
newStatus.CanaryStatus.ObservedWorkloadGeneration = workload.Generation
|
||||
}
|
||||
|
||||
switch newStatus.Phase {
|
||||
case v1alpha1.RolloutPhaseInitial:
|
||||
klog.Infof("rollout(%s/%s) status phase from(%s) -> to(%s)", rollout.Namespace, rollout.Name, v1alpha1.RolloutPhaseInitial, v1alpha1.RolloutPhaseHealthy)
|
||||
newStatus.Phase = v1alpha1.RolloutPhaseHealthy
|
||||
newStatus.Message = "rollout is healthy"
|
||||
case v1alpha1.RolloutPhaseHealthy:
|
||||
// workload released, entering the rollout progressing phase
|
||||
if workload.InRolloutProgressing {
|
||||
klog.Infof("rollout(%s/%s) status phase from(%s) -> to(%s)", rollout.Namespace, rollout.Name, v1alpha1.RolloutPhaseHealthy, v1alpha1.RolloutPhaseProgressing)
|
||||
newStatus.Phase = v1alpha1.RolloutPhaseProgressing
|
||||
cond := util.NewRolloutCondition(v1alpha1.RolloutConditionProgressing, corev1.ConditionTrue, v1alpha1.ProgressingReasonInitializing, "Rollout is in Progressing")
|
||||
util.SetRolloutCondition(newStatus, *cond)
|
||||
util.RemoveRolloutCondition(newStatus, v1alpha1.RolloutConditionSucceeded)
|
||||
} else if newStatus.CanaryStatus == nil {
|
||||
// The following logic is to make PaaS be able to judge whether the rollout is ready
|
||||
// at the first deployment of the Rollout/Workload. For example: generally, a PaaS
|
||||
// platform can use the following code to judge whether the rollout progression is completed:
|
||||
// ```
|
||||
// if getRolloutID(workload, rollout) == newStatus.CanaryStatus.ObservedRolloutID &&
|
||||
// newStatus.CanaryStatus.CurrentStepState == "Completed" {
|
||||
// // do something after rollout
|
||||
// }
|
||||
//```
|
||||
// But at the first deployment of Rollout/Workload, CanaryStatus isn't set due to no rollout progression,
|
||||
// and PaaS platform cannot judge whether the deployment is completed base on the code above. So we have
|
||||
// to update the status just like the rollout was completed.
|
||||
|
||||
newStatus.CanaryStatus = &v1alpha1.CanaryStatus{
|
||||
ObservedRolloutID: getRolloutID(workload),
|
||||
ObservedWorkloadGeneration: workload.Generation,
|
||||
PodTemplateHash: workload.PodTemplateHash,
|
||||
CanaryRevision: workload.CanaryRevision,
|
||||
StableRevision: workload.StableRevision,
|
||||
CurrentStepIndex: int32(len(rollout.Spec.Strategy.Canary.Steps)),
|
||||
CurrentStepState: v1alpha1.CanaryStepStateCompleted,
|
||||
RolloutHash: rollout.Annotations[util.RolloutHashAnnotation],
|
||||
}
|
||||
newStatus.Message = "workload deployment is completed"
|
||||
}
|
||||
}
|
||||
return false, newStatus, nil
|
||||
}
|
||||
|
||||
// rolloutHash mainly records the step batch information, when the user step changes,
|
||||
// the current batch can be recalculated
|
||||
func (r *RolloutReconciler) calculateRolloutHash(rollout *v1alpha1.Rollout) error {
|
||||
canary := rollout.Spec.Strategy.Canary.DeepCopy()
|
||||
canary.FailureThreshold = nil
|
||||
canary.Steps = nil
|
||||
for i := range rollout.Spec.Strategy.Canary.Steps {
|
||||
step := rollout.Spec.Strategy.Canary.Steps[i].DeepCopy()
|
||||
step.Pause = v1alpha1.RolloutPause{}
|
||||
canary.Steps = append(canary.Steps, *step)
|
||||
}
|
||||
data := util.DumpJSON(canary)
|
||||
hash := rand.SafeEncodeString(util.EncodeHash(data))
|
||||
if rollout.Annotations[util.RolloutHashAnnotation] == hash {
|
||||
return nil
|
||||
}
|
||||
// update rollout hash in annotation
|
||||
cloneObj := rollout.DeepCopy()
|
||||
body := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, util.RolloutHashAnnotation, hash)
|
||||
err := r.Patch(context.TODO(), cloneObj, client.RawPatch(types.MergePatchType, []byte(body)))
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch(%s) failed: %s", rollout.Namespace, rollout.Name, body, err.Error())
|
||||
return err
|
||||
}
|
||||
if rollout.Annotations == nil {
|
||||
rollout.Annotations = map[string]string{}
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) patch hash from(%s) -> to(%s)", rollout.Namespace, rollout.Name, rollout.Annotations[util.RolloutHashAnnotation], hash)
|
||||
rollout.Annotations[util.RolloutHashAnnotation] = hash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) updateRolloutStatusInternal(rollout *v1alpha1.Rollout, newStatus v1alpha1.RolloutStatus) error {
|
||||
if reflect.DeepEqual(rollout.Status, newStatus) {
|
||||
return nil
|
||||
}
|
||||
rolloutClone := rollout.DeepCopy()
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Namespace: rollout.Namespace, Name: rollout.Name}, rolloutClone); err != nil {
|
||||
klog.Errorf("error getting updated rollout(%s/%s) from client", rollout.Namespace, rollout.Name)
|
||||
return err
|
||||
}
|
||||
rolloutClone.Status = newStatus
|
||||
return r.Client.Status().Update(context.TODO(), rolloutClone)
|
||||
}); err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) status failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
rollout.Status = newStatus
|
||||
klog.Infof("rollout(%s/%s) status from(%s) -> to(%s) success", rollout.Namespace, rollout.Name, util.DumpJSON(rollout.Status), util.DumpJSON(newStatus))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) reconcileRolloutTerminating(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
cond := util.GetRolloutCondition(rollout.Status, v1alpha1.RolloutConditionTerminating)
|
||||
if cond.Reason == v1alpha1.TerminatingReasonCompleted {
|
||||
return nil, nil
|
||||
}
|
||||
workload, err := r.finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get workload failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
c := &util.RolloutContext{Rollout: rollout, NewStatus: newStatus, Workload: workload}
|
||||
done, err := r.doFinalising(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if done {
|
||||
klog.Infof("rollout(%s/%s) is terminating, and state from(%s) -> to(%s)", rollout.Namespace, rollout.Name, cond.Reason, v1alpha1.TerminatingReasonCompleted)
|
||||
cond.Reason = v1alpha1.TerminatingReasonCompleted
|
||||
cond.Status = corev1.ConditionFalse
|
||||
util.SetRolloutCondition(newStatus, *cond)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
c.RecheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) terminating is incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
return c.RecheckTime, nil
|
||||
}
|
||||
|
||||
// handle adding and handle finalizer logic, it turns if we should continue to reconcile
|
||||
func (r *RolloutReconciler) handleFinalizer(rollout *v1alpha1.Rollout) error {
|
||||
// delete rollout crd, remove finalizer
|
||||
if !rollout.DeletionTimestamp.IsZero() {
|
||||
cond := util.GetRolloutCondition(rollout.Status, v1alpha1.RolloutConditionTerminating)
|
||||
if cond != nil && cond.Reason == v1alpha1.TerminatingReasonCompleted {
|
||||
// Completed
|
||||
if controllerutil.ContainsFinalizer(rollout, util.KruiseRolloutFinalizer) {
|
||||
err := util.UpdateFinalizer(r.Client, rollout, util.RemoveFinalizerOpType, util.KruiseRolloutFinalizer)
|
||||
if err != nil {
|
||||
klog.Errorf("remove rollout(%s/%s) finalizer failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("remove rollout(%s/%s) finalizer success", rollout.Namespace, rollout.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create rollout crd, add finalizer
|
||||
if !controllerutil.ContainsFinalizer(rollout, util.KruiseRolloutFinalizer) {
|
||||
err := util.UpdateFinalizer(r.Client, rollout, util.AddFinalizerOpType, util.KruiseRolloutFinalizer)
|
||||
if err != nil {
|
||||
klog.Errorf("register rollout(%s/%s) finalizer failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("register rollout(%s/%s) finalizer success", rollout.Namespace, rollout.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRolloutID(workload *util.Workload) string {
|
||||
if workload != nil {
|
||||
return workload.Labels[v1alpha1.RolloutIDLabel]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
func TestCalculateRolloutHash(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getRollout func() *v1alpha1.Rollout
|
||||
expectHash func() string
|
||||
}{
|
||||
{
|
||||
name: "hash, test1",
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
return obj
|
||||
},
|
||||
expectHash: func() string {
|
||||
return "626fd556c5d5v2d9b4f7c2xvbc9dxddxzd48xvb9w9wfcdvdz6v959fbzd84b57x"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hash, test2",
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Paused = true
|
||||
obj.Spec.Strategy.Canary.FailureThreshold = &intstr.IntOrString{Type: intstr.Int}
|
||||
obj.Spec.Strategy.Canary.Steps[0].Pause = v1alpha1.RolloutPause{Duration: utilpointer.Int32(10)}
|
||||
return obj
|
||||
},
|
||||
expectHash: func() string {
|
||||
return "626fd556c5d5v2d9b4f7c2xvbc9dxddxzd48xvb9w9wfcdvdz6v959fbzd84b57x"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hash, test3",
|
||||
getRollout: func() *v1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
obj.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(50),
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
},
|
||||
}
|
||||
return obj
|
||||
},
|
||||
expectHash: func() string {
|
||||
return "8c449wxc46x8dd764x4v4wzvc7454f48478vd9db27fv8v9dw5cwbcb6b42b75dc"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
rollout := cs.getRollout()
|
||||
fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rollout).Build()
|
||||
r := &RolloutReconciler{
|
||||
Client: fc,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
finder: util.NewControllerFinder(fc),
|
||||
trafficRoutingManager: trafficrouting.NewTrafficRoutingManager(fc),
|
||||
}
|
||||
r.canaryManager = &canaryReleaseManager{
|
||||
Client: fc,
|
||||
trafficRoutingManager: r.trafficRoutingManager,
|
||||
recorder: r.Recorder,
|
||||
}
|
||||
_ = r.calculateRolloutHash(rollout)
|
||||
if rollout.Annotations[util.RolloutHashAnnotation] != cs.expectHash() {
|
||||
t.Fatalf("expect(%s), but get(%s)", cs.expectHash(), rollout.Annotations[util.RolloutHashAnnotation])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,200 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (r *RolloutReconciler) updateRolloutStatus(rollout *rolloutv1alpha1.Rollout) (done bool, err error) {
|
||||
newStatus := *rollout.Status.DeepCopy()
|
||||
newStatus.ObservedGeneration = rollout.GetGeneration()
|
||||
defer func() {
|
||||
err = r.updateRolloutStatusInternal(rollout, newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) status failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return
|
||||
}
|
||||
err = r.calculateRolloutHash(rollout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rollout.Status = newStatus
|
||||
}()
|
||||
|
||||
// delete rollout CRD
|
||||
if !rollout.DeletionTimestamp.IsZero() && newStatus.Phase != rolloutv1alpha1.RolloutPhaseTerminating {
|
||||
newStatus.Phase = rolloutv1alpha1.RolloutPhaseTerminating
|
||||
cond := util.NewRolloutCondition(rolloutv1alpha1.RolloutConditionTerminating, corev1.ConditionFalse, rolloutv1alpha1.TerminatingReasonInTerminating, "Rollout is in terminating")
|
||||
util.SetRolloutCondition(&newStatus, *cond)
|
||||
} else if newStatus.Phase == "" {
|
||||
newStatus.Phase = rolloutv1alpha1.RolloutPhaseInitial
|
||||
}
|
||||
// get ref workload
|
||||
workload, err := r.Finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get workload failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return
|
||||
} else if workload == nil {
|
||||
if rollout.DeletionTimestamp.IsZero() {
|
||||
resetStatus(&newStatus)
|
||||
klog.Infof("rollout(%s/%s) workload not found, and reset status be Initial", rollout.Namespace, rollout.Name)
|
||||
}
|
||||
done = true
|
||||
return
|
||||
}
|
||||
|
||||
// workload status is not consistent
|
||||
if !workload.IsStatusConsistent {
|
||||
klog.Infof("rollout(%s/%s) workload status isn't consistent, then wait a moment", rollout.Namespace, rollout.Name)
|
||||
done = false
|
||||
return
|
||||
}
|
||||
newStatus.StableRevision = workload.StableRevision
|
||||
// update workload generation to canaryStatus.ObservedWorkloadGeneration
|
||||
// rollout is a target ref bypass, so there needs to be a field to identify the rollout execution process or results,
|
||||
// which version of deployment is targeted, ObservedWorkloadGeneration that is to compare with the workload generation
|
||||
if newStatus.CanaryStatus != nil && newStatus.CanaryStatus.CanaryRevision != "" &&
|
||||
newStatus.CanaryStatus.CanaryRevision == workload.CanaryRevision {
|
||||
newStatus.CanaryStatus.ObservedRolloutID = getRolloutID(workload, rollout)
|
||||
newStatus.CanaryStatus.ObservedWorkloadGeneration = workload.Generation
|
||||
}
|
||||
|
||||
switch newStatus.Phase {
|
||||
case rolloutv1alpha1.RolloutPhaseInitial:
|
||||
klog.Infof("rollout(%s/%s) status phase from(%s) -> to(%s)", rollout.Namespace, rollout.Name, rolloutv1alpha1.RolloutPhaseInitial, rolloutv1alpha1.RolloutPhaseHealthy)
|
||||
newStatus.Phase = rolloutv1alpha1.RolloutPhaseHealthy
|
||||
newStatus.Message = "rollout is healthy"
|
||||
case rolloutv1alpha1.RolloutPhaseHealthy:
|
||||
if workload.InRolloutProgressing {
|
||||
// from healthy to progressing
|
||||
klog.Infof("rollout(%s/%s) status phase from(%s) -> to(%s)", rollout.Namespace, rollout.Name, rolloutv1alpha1.RolloutPhaseHealthy, rolloutv1alpha1.RolloutPhaseProgressing)
|
||||
newStatus.Phase = rolloutv1alpha1.RolloutPhaseProgressing
|
||||
cond := util.NewRolloutCondition(rolloutv1alpha1.RolloutConditionProgressing, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonInitializing, "Rollout is in Progressing")
|
||||
util.SetRolloutCondition(&newStatus, *cond)
|
||||
newStatus.CanaryStatus = &rolloutv1alpha1.CanaryStatus{}
|
||||
newStatus.Message = "Rollout is in Progressing"
|
||||
} else if newStatus.CanaryStatus == nil {
|
||||
// The following logic is to make PaaS be able to judge whether the rollout is ready
|
||||
// at the first deployment of the Rollout/Workload. For example: generally, a PaaS
|
||||
// platform can use the following code to judge whether the rollout progression is completed:
|
||||
// ```
|
||||
// if getRolloutID(workload, rollout) == rollout.Status.CanaryStatus.ObservedRolloutID &&
|
||||
// rollout.Status.CanaryStatus.CurrentStepState == "Completed" {
|
||||
// // do something after rollout
|
||||
// }
|
||||
//```
|
||||
// But at the first deployment of Rollout/Workload, CanaryStatus isn't set due to no rollout progression,
|
||||
// and PaaS platform cannot judge whether the deployment is completed base on the code above. So we have
|
||||
// to update the status just like the rollout was completed.
|
||||
|
||||
newStatus.CanaryStatus = &rolloutv1alpha1.CanaryStatus{
|
||||
CanaryReplicas: workload.CanaryReplicas,
|
||||
CanaryReadyReplicas: workload.CanaryReadyReplicas,
|
||||
ObservedRolloutID: getRolloutID(workload, rollout),
|
||||
ObservedWorkloadGeneration: workload.Generation,
|
||||
PodTemplateHash: workload.PodTemplateHash,
|
||||
CanaryRevision: workload.CanaryRevision,
|
||||
CurrentStepIndex: int32(len(rollout.Spec.Strategy.Canary.Steps)),
|
||||
CurrentStepState: rolloutv1alpha1.CanaryStepStateCompleted,
|
||||
}
|
||||
newStatus.Message = "workload deployment is completed"
|
||||
}
|
||||
case rolloutv1alpha1.RolloutPhaseProgressing:
|
||||
cond := util.GetRolloutCondition(newStatus, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
if cond == nil || cond.Reason == rolloutv1alpha1.ProgressingReasonSucceeded || cond.Reason == rolloutv1alpha1.ProgressingReasonCanceled {
|
||||
newStatus.Phase = rolloutv1alpha1.RolloutPhaseHealthy
|
||||
}
|
||||
}
|
||||
done = true
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) updateRolloutStatusInternal(rollout *rolloutv1alpha1.Rollout, newStatus rolloutv1alpha1.RolloutStatus) error {
|
||||
if reflect.DeepEqual(rollout.Status, newStatus) {
|
||||
return nil
|
||||
}
|
||||
rolloutClone := rollout.DeepCopy()
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Namespace: rollout.Namespace, Name: rollout.Name}, rolloutClone); err != nil {
|
||||
klog.Errorf("error getting updated rollout(%s/%s) from client", rollout.Namespace, rollout.Name)
|
||||
return err
|
||||
}
|
||||
rolloutClone.Status = newStatus
|
||||
if err := r.Client.Status().Update(context.TODO(), rolloutClone); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
oldBy, _ := json.Marshal(rollout.Status)
|
||||
newBy, _ := json.Marshal(newStatus)
|
||||
klog.Infof("rollout(%s/%s) status from(%s) -> to(%s)", rollout.Namespace, rollout.Name, string(oldBy), string(newBy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetStatus resets the status of the rollout to start from beginning
|
||||
func resetStatus(status *rolloutv1alpha1.RolloutStatus) {
|
||||
status.StableRevision = ""
|
||||
//util.RemoveRolloutCondition(status, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
status.Phase = rolloutv1alpha1.RolloutPhaseInitial
|
||||
status.Message = "workload not found"
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) calculateRolloutHash(rollout *rolloutv1alpha1.Rollout) error {
|
||||
spec := rollout.Spec.DeepCopy()
|
||||
// ignore paused filed
|
||||
spec.Strategy.Paused = false
|
||||
data := util.DumpJSON(spec)
|
||||
hash := rand.SafeEncodeString(hash(data))
|
||||
if rollout.Annotations[util.RolloutHashAnnotation] == hash {
|
||||
return nil
|
||||
}
|
||||
// update rollout hash in annotation
|
||||
cloneObj := rollout.DeepCopy()
|
||||
body := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, util.RolloutHashAnnotation, hash)
|
||||
err := r.Patch(context.TODO(), cloneObj, client.RawPatch(types.MergePatchType, []byte(body)))
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch(%s) failed: %s", rollout.Namespace, rollout.Name, body, err.Error())
|
||||
return err
|
||||
}
|
||||
if rollout.Annotations == nil {
|
||||
rollout.Annotations = map[string]string{}
|
||||
}
|
||||
rollout.Annotations[util.RolloutHashAnnotation] = hash
|
||||
klog.Infof("rollout(%s/%s) patch annotation(%s=%s) success", rollout.Namespace, rollout.Name, util.RolloutHashAnnotation, hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// hash hashes `data` with sha256 and returns the hex string
|
||||
func hash(data string) string {
|
||||
return fmt.Sprintf("%x", sha256.Sum256([]byte(data)))
|
||||
}
|
||||
|
|
@ -1,285 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/trafficrouting/gateway"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/trafficrouting/ingress"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (r *rolloutContext) doCanaryTrafficRouting() (bool, error) {
|
||||
if len(r.rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
// current only support one trafficRouting
|
||||
trafficRouting := r.rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
if trafficRouting.GracePeriodSeconds <= 0 {
|
||||
trafficRouting.GracePeriodSeconds = defaultGracePeriodSeconds
|
||||
}
|
||||
canaryStatus := r.newStatus.CanaryStatus
|
||||
if r.newStatus.StableRevision == "" || canaryStatus.PodTemplateHash == "" {
|
||||
klog.Warningf("rollout(%s/%s) stableRevision or podTemplateHash can't be empty, and wait a moment", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
canaryStatus.CanaryService = r.canaryService
|
||||
//fetch stable service
|
||||
stableService := &corev1.Service{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.stableService}, stableService)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get stable service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.stableService, err.Error())
|
||||
// not found, wait a moment, retry
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
// fetch canary service
|
||||
// todo for the time being, we do not consider the scenario where the user only changes the stable service definition during rollout progressing
|
||||
canaryService := &corev1.Service{}
|
||||
err = r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.canaryService}, canaryService)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("rollout(%s/%s) get canary service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.canaryService, err.Error())
|
||||
return false, err
|
||||
} else if errors.IsNotFound(err) {
|
||||
klog.Infof("rollout(%s/%s) canary service(%s) Not Found, and create it", r.rollout.Namespace, r.rollout.Name, r.canaryService)
|
||||
return false, r.createCanaryService(stableService)
|
||||
}
|
||||
|
||||
// update service selector
|
||||
// update service selector specific revision pods
|
||||
if canaryService.Spec.Selector[r.podRevisionLabelKey()] != canaryStatus.PodTemplateHash {
|
||||
cloneObj := canaryService.DeepCopy()
|
||||
body := fmt.Sprintf(`{"spec":{"selector":{"%s":"%s"}}}`, r.podRevisionLabelKey(), canaryStatus.PodTemplateHash)
|
||||
if err = r.Patch(context.TODO(), cloneObj, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch canary service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.canaryService, err.Error())
|
||||
return false, err
|
||||
}
|
||||
// update canary service time, and wait 3 seconds, just to be safe
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
klog.Infof("add rollout(%s/%s) canary service(%s) selector(%s=%s) success",
|
||||
r.rollout.Namespace, r.rollout.Name, r.canaryService, r.podRevisionLabelKey(), canaryStatus.PodTemplateHash)
|
||||
}
|
||||
if stableService.Spec.Selector[r.podRevisionLabelKey()] != r.newStatus.StableRevision {
|
||||
cloneObj := stableService.DeepCopy()
|
||||
body := fmt.Sprintf(`{"spec":{"selector":{"%s":"%s"}}}`, r.podRevisionLabelKey(), r.newStatus.StableRevision)
|
||||
if err = r.Patch(context.TODO(), cloneObj, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch stable service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.stableService, err.Error())
|
||||
return false, err
|
||||
}
|
||||
// update stable service time, and wait 3 seconds, just to be safe
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
klog.Infof("add rollout(%s/%s) stable service(%s) selector(%s=%s) success",
|
||||
r.rollout.Namespace, r.rollout.Name, r.stableService, r.podRevisionLabelKey(), r.newStatus.StableRevision)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// After restore stable service configuration, give the ingress provider 3 seconds to take effect
|
||||
if verifyTime := canaryStatus.LastUpdateTime.Add(time.Second * time.Duration(trafficRouting.GracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("update rollout(%s/%s) stable service(%s) done, and wait 3 seconds", r.rollout.Namespace, r.rollout.Name, r.stableService)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// route traffic configuration
|
||||
trController, err := r.newTrafficRoutingController(r)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) newTrafficRoutingController failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
currentStep := r.rollout.Spec.Strategy.Canary.Steps[canaryStatus.CurrentStepIndex-1]
|
||||
totalStep := len(r.rollout.Spec.Strategy.Canary.Steps)
|
||||
cond := util.GetRolloutCondition(*r.newStatus, rolloutv1alpha1.RolloutConditionProgressing)
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and route traffic (%d)", canaryStatus.CurrentStepIndex, totalStep, currentStep.Weight)
|
||||
verify, err := trController.EnsureRoutes(context.TODO(), currentStep.Weight, currentStep.Matches)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if !verify {
|
||||
klog.Infof("rollout(%s/%s) is doing step(%d) trafficRouting(%s)", r.rollout.Namespace, r.rollout.Name, canaryStatus.CurrentStepIndex, util.DumpJSON(currentStep))
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) do step(%d) trafficRouting(%s) success", r.rollout.Namespace, r.rollout.Name, canaryStatus.CurrentStepIndex, util.DumpJSON(currentStep))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// restore stable service configuration, remove selector pod-template-hash
|
||||
func (r *rolloutContext) restoreStableService() (bool, error) {
|
||||
if len(r.rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
// current only support one trafficRouting
|
||||
trafficRouting := r.rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
if trafficRouting.GracePeriodSeconds <= 0 {
|
||||
trafficRouting.GracePeriodSeconds = defaultGracePeriodSeconds
|
||||
}
|
||||
//fetch stable service
|
||||
stableService := &corev1.Service{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Namespace: r.rollout.Namespace, Name: r.stableService}, stableService)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
klog.Errorf("rollout(%s/%s) get stable service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.stableService, err.Error())
|
||||
return false, err
|
||||
}
|
||||
|
||||
if r.newStatus.CanaryStatus == nil {
|
||||
r.newStatus.CanaryStatus = &rolloutv1alpha1.CanaryStatus{}
|
||||
}
|
||||
//restore stable service configuration,remove hash revision selector
|
||||
if stableService.Spec.Selector != nil && stableService.Spec.Selector[r.podRevisionLabelKey()] != "" {
|
||||
cloneObj := stableService.DeepCopy()
|
||||
body := fmt.Sprintf(`{"spec":{"selector":{"%s":null}}}`, r.podRevisionLabelKey())
|
||||
if err = r.Patch(context.TODO(), cloneObj, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch stable service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.stableService, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("remove rollout(%s/%s) stable service(%s) pod revision selector success, and retry later", r.rollout.Namespace, r.rollout.Name, r.stableService)
|
||||
r.newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
return false, nil
|
||||
}
|
||||
// After restore stable service configuration, give the ingress provider 3 seconds to take effect
|
||||
if r.newStatus.CanaryStatus.LastUpdateTime != nil {
|
||||
if verifyTime := r.newStatus.CanaryStatus.LastUpdateTime.Add(time.Second * time.Duration(trafficRouting.GracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("restore rollout(%s/%s) stable service(%s) done, and wait a moment", r.rollout.Namespace, r.rollout.Name, r.stableService)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) doFinalising restore stable service(%s) success", r.rollout.Namespace, r.rollout.Name, r.stableService)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// 1. restore ingress or gateway configuration
|
||||
// 2. remove canary service
|
||||
func (r *rolloutContext) doFinalisingTrafficRouting() (bool, error) {
|
||||
if len(r.rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
// current only support one trafficRouting
|
||||
trafficRouting := r.rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
klog.Infof("rollout(%s/%s) start finalising traffic routing", r.rollout.Namespace, r.rollout.Name)
|
||||
if trafficRouting.GracePeriodSeconds <= 0 {
|
||||
trafficRouting.GracePeriodSeconds = defaultGracePeriodSeconds
|
||||
}
|
||||
if r.newStatus.CanaryStatus == nil {
|
||||
r.newStatus.CanaryStatus = &rolloutv1alpha1.CanaryStatus{}
|
||||
}
|
||||
// 1. restore ingress and route traffic to stable service
|
||||
trController, err := r.newTrafficRoutingController(r)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) newTrafficRoutingController failed: %s", r.rollout.Namespace, r.rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
verify, err := trController.Finalise(context.TODO())
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if !verify {
|
||||
klog.Infof("rollout(%s/%s) do finalising: ensure canary routes(weight:0)", r.rollout.Namespace, r.rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// 2. remove canary service
|
||||
if r.newStatus.CanaryStatus.CanaryService == "" {
|
||||
return true, nil
|
||||
}
|
||||
cService := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: r.rollout.Namespace,
|
||||
Name: r.newStatus.CanaryStatus.CanaryService,
|
||||
},
|
||||
}
|
||||
err = r.Delete(context.TODO(), cService)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("rollout(%s/%s) remove canary service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, cService.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) remove canary service(%s) success", r.rollout.Namespace, r.rollout.Name, cService.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *rolloutContext) newTrafficRoutingController(roCtx *rolloutContext) (trafficrouting.Controller, error) {
|
||||
trafficRouting := roCtx.rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
if trafficRouting.Ingress != nil {
|
||||
gvk := schema.GroupVersionKind{Group: rolloutv1alpha1.GroupVersion.Group, Version: rolloutv1alpha1.GroupVersion.Version, Kind: "Rollout"}
|
||||
return ingress.NewIngressTrafficRouting(r.Client, ingress.Config{
|
||||
RolloutName: r.rollout.Name,
|
||||
RolloutNs: r.rollout.Namespace,
|
||||
CanaryService: r.canaryService,
|
||||
StableService: r.stableService,
|
||||
TrafficConf: trafficRouting.Ingress,
|
||||
OwnerRef: *metav1.NewControllerRef(r.rollout, gvk),
|
||||
})
|
||||
}
|
||||
if trafficRouting.Gateway != nil {
|
||||
return gateway.NewGatewayTrafficRouting(r.Client, gateway.Config{
|
||||
RolloutName: r.rollout.Name,
|
||||
RolloutNs: r.rollout.Namespace,
|
||||
CanaryService: r.canaryService,
|
||||
StableService: r.stableService,
|
||||
TrafficConf: r.rollout.Spec.Strategy.Canary.TrafficRoutings[0].Gateway,
|
||||
})
|
||||
}
|
||||
return nil, fmt.Errorf("TrafficRouting only support Ingress or Gateway")
|
||||
}
|
||||
|
||||
func (r *rolloutContext) createCanaryService(stableService *corev1.Service) error {
|
||||
canaryService := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: r.rollout.Namespace,
|
||||
Name: r.canaryService,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: r.rollout.APIVersion,
|
||||
Kind: r.rollout.Kind,
|
||||
Name: r.rollout.Name,
|
||||
UID: r.rollout.UID,
|
||||
Controller: utilpointer.BoolPtr(true),
|
||||
BlockOwnerDeletion: utilpointer.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: *stableService.Spec.DeepCopy(),
|
||||
}
|
||||
// set field nil
|
||||
canaryService.Spec.ClusterIP = ""
|
||||
canaryService.Spec.ClusterIPs = nil
|
||||
canaryService.Spec.ExternalIPs = nil
|
||||
canaryService.Spec.IPFamilyPolicy = nil
|
||||
canaryService.Spec.IPFamilies = nil
|
||||
canaryService.Spec.LoadBalancerIP = ""
|
||||
canaryService.Spec.Selector[r.podRevisionLabelKey()] = r.newStatus.CanaryStatus.PodTemplateHash
|
||||
err := r.Create(context.TODO(), canaryService)
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("create rollout(%s/%s) canary service(%s) failed: %s", r.rollout.Namespace, r.rollout.Name, r.canaryService, err.Error())
|
||||
return err
|
||||
}
|
||||
klog.Infof("create rollout(%s/%s) canary service(%s) success", r.rollout.Namespace, r.rollout.Name, util.DumpJSON(canaryService))
|
||||
return nil
|
||||
}
|
||||
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/feature"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
utilfeature "github.com/openkruise/rollouts/pkg/util/feature"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -359,7 +358,7 @@ func (r *RolloutHistoryReconciler) recordStatusCanarySteps(rollout *rolloutv1alp
|
|||
return err
|
||||
}
|
||||
// get extra labelSelector including rolloutBathID, rolloutID and workload selector
|
||||
lableSelectorString := fmt.Sprintf("%v=%v,%v=%v,%v", util.RolloutBatchIDLabel, len(rolloutHistory.Status.CanarySteps)+1, util.RolloutIDLabel, rolloutHistory.Spec.Rollout.RolloutID, selector.String())
|
||||
lableSelectorString := fmt.Sprintf("%v=%v,%v=%v,%v", rolloutv1alpha1.RolloutBatchIDLabel, len(rolloutHistory.Status.CanarySteps)+1, rolloutv1alpha1.RolloutIDLabel, rolloutHistory.Spec.Rollout.RolloutID, selector.String())
|
||||
extraSelector, err = labels.Parse(lableSelectorString)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ import (
|
|||
"github.com/openkruise/kruise-api/apps/pub"
|
||||
kruisev1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
@ -133,7 +132,7 @@ var (
|
|||
Name: "workload-demo",
|
||||
},
|
||||
},
|
||||
RolloutID: "1",
|
||||
DeprecatedRolloutID: "1",
|
||||
Strategy: rolloutv1alpha1.RolloutStrategy{
|
||||
Canary: &rolloutv1alpha1.CanaryStrategy{
|
||||
Steps: []rolloutv1alpha1.CanaryStep{
|
||||
|
|
@ -321,8 +320,8 @@ var (
|
|||
Name: "pod-demo",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
util.RolloutBatchIDLabel: "1",
|
||||
util.RolloutIDLabel: "1",
|
||||
rolloutv1alpha1.RolloutBatchIDLabel: "1",
|
||||
rolloutv1alpha1.RolloutIDLabel: "1",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
|
|
@ -409,8 +408,8 @@ func TestReconcile(t *testing.T) {
|
|||
PodIP: "1.2.3.1",
|
||||
}
|
||||
pod1.Labels = map[string]string{
|
||||
util.RolloutBatchIDLabel: "1",
|
||||
util.RolloutIDLabel: "2",
|
||||
rolloutv1alpha1.RolloutBatchIDLabel: "1",
|
||||
rolloutv1alpha1.RolloutIDLabel: "2",
|
||||
"app": "echoserver",
|
||||
}
|
||||
|
||||
|
|
@ -421,8 +420,8 @@ func TestReconcile(t *testing.T) {
|
|||
PodIP: "1.2.3.2",
|
||||
}
|
||||
pod2.Labels = map[string]string{
|
||||
util.RolloutBatchIDLabel: "2",
|
||||
util.RolloutIDLabel: "2",
|
||||
rolloutv1alpha1.RolloutBatchIDLabel: "2",
|
||||
rolloutv1alpha1.RolloutIDLabel: "2",
|
||||
"app": "echoserver",
|
||||
}
|
||||
|
||||
|
|
@ -433,8 +432,8 @@ func TestReconcile(t *testing.T) {
|
|||
PodIP: "1.2.3.3",
|
||||
}
|
||||
pod3.Labels = map[string]string{
|
||||
util.RolloutBatchIDLabel: "3",
|
||||
util.RolloutIDLabel: "2",
|
||||
rolloutv1alpha1.RolloutBatchIDLabel: "3",
|
||||
rolloutv1alpha1.RolloutIDLabel: "2",
|
||||
"app": "echoserver",
|
||||
}
|
||||
|
||||
|
|
@ -445,8 +444,8 @@ func TestReconcile(t *testing.T) {
|
|||
PodIP: "1.2.3.4",
|
||||
}
|
||||
pod4.Labels = map[string]string{
|
||||
util.RolloutBatchIDLabel: "3",
|
||||
util.RolloutIDLabel: "2",
|
||||
rolloutv1alpha1.RolloutBatchIDLabel: "3",
|
||||
rolloutv1alpha1.RolloutIDLabel: "2",
|
||||
"app": "echoserver",
|
||||
}
|
||||
|
||||
|
|
@ -457,8 +456,8 @@ func TestReconcile(t *testing.T) {
|
|||
PodIP: "1.2.3.5",
|
||||
}
|
||||
pod5.Labels = map[string]string{
|
||||
util.RolloutBatchIDLabel: "3",
|
||||
util.RolloutIDLabel: "2",
|
||||
rolloutv1alpha1.RolloutBatchIDLabel: "3",
|
||||
rolloutv1alpha1.RolloutIDLabel: "2",
|
||||
"app": "echoserver",
|
||||
}
|
||||
|
||||
|
|
@ -482,7 +481,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
getRollout: func() []*rolloutv1alpha1.Rollout {
|
||||
rollout := rolloutDemo1.DeepCopy()
|
||||
rollout.Spec.RolloutID = "2"
|
||||
rollout.Spec.DeprecatedRolloutID = "2"
|
||||
rollout.Status = rolloutv1alpha1.RolloutStatus{
|
||||
CanaryStatus: &rolloutv1alpha1.CanaryStatus{
|
||||
ObservedRolloutID: "2",
|
||||
|
|
@ -615,7 +614,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
getRollout: func() []*rolloutv1alpha1.Rollout {
|
||||
rollout := rolloutDemo1.DeepCopy()
|
||||
rollout.Spec.RolloutID = ""
|
||||
rollout.Spec.DeprecatedRolloutID = ""
|
||||
rollout.Status = rolloutv1alpha1.RolloutStatus{
|
||||
CanaryStatus: &rolloutv1alpha1.CanaryStatus{
|
||||
ObservedRolloutID: "",
|
||||
|
|
@ -663,7 +662,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
getRollout: func() []*rolloutv1alpha1.Rollout {
|
||||
rollout := rolloutDemo1.DeepCopy()
|
||||
rollout.Spec.RolloutID = "4"
|
||||
rollout.Spec.DeprecatedRolloutID = "4"
|
||||
rollout.Status = rolloutv1alpha1.RolloutStatus{
|
||||
CanaryStatus: &rolloutv1alpha1.CanaryStatus{
|
||||
ObservedRolloutID: "4",
|
||||
|
|
@ -869,7 +868,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
getRollout: func() []*rolloutv1alpha1.Rollout {
|
||||
rollout := rolloutDemo1.DeepCopy()
|
||||
rollout.Spec.RolloutID = "5"
|
||||
rollout.Spec.DeprecatedRolloutID = "5"
|
||||
rollout.Status = rolloutv1alpha1.RolloutStatus{
|
||||
CanaryStatus: &rolloutv1alpha1.CanaryStatus{
|
||||
ObservedRolloutID: "5",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,310 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package trafficrouting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting/network"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting/network/gateway"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting/network/ingress"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultGracePeriodSeconds int32 = 3
|
||||
rolloutControllerKind = v1alpha1.SchemeGroupVersion.WithKind("Rollout")
|
||||
)
|
||||
|
||||
// Manager responsible for adjusting network resources
|
||||
// such as Service, Ingress, Gateway API, etc., to achieve traffic grayscale.
|
||||
type Manager struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func NewTrafficRoutingManager(c client.Client) *Manager {
|
||||
return &Manager{c}
|
||||
}
|
||||
|
||||
// InitializeTrafficRouting determine if the network resources(service & ingress & gateway api) exist.
|
||||
// If it is Ingress, init method will create the canary ingress resources, and set weight=0.
|
||||
func (m *Manager) InitializeTrafficRouting(c *util.RolloutContext) error {
|
||||
if len(c.Rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
return nil
|
||||
}
|
||||
sService := c.Rollout.Spec.Strategy.Canary.TrafficRoutings[0].Service
|
||||
// check service
|
||||
service := &corev1.Service{}
|
||||
if err := m.Get(context.TODO(), types.NamespacedName{Namespace: c.Rollout.Namespace, Name: sService}, service); err != nil {
|
||||
return err
|
||||
}
|
||||
cService := fmt.Sprintf("%s-canary", sService)
|
||||
// new network provider, ingress or gateway
|
||||
trController, err := newNetworkProvider(m.Client, c.Rollout, c.NewStatus, sService, cService)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) newNetworkProvider failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
return trController.Initialize(context.TODO())
|
||||
}
|
||||
|
||||
func (m *Manager) DoTrafficRouting(c *util.RolloutContext) (bool, error) {
|
||||
if len(c.Rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
trafficRouting := c.Rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
if trafficRouting.GracePeriodSeconds <= 0 {
|
||||
trafficRouting.GracePeriodSeconds = defaultGracePeriodSeconds
|
||||
}
|
||||
canaryStatus := c.NewStatus.CanaryStatus
|
||||
if canaryStatus.StableRevision == "" || canaryStatus.PodTemplateHash == "" {
|
||||
klog.Warningf("rollout(%s/%s) stableRevision or podTemplateHash can not be empty, and wait a moment", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
//fetch stable service
|
||||
stableService := &corev1.Service{}
|
||||
err := m.Get(context.TODO(), client.ObjectKey{Namespace: c.Rollout.Namespace, Name: trafficRouting.Service}, stableService)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) get stable service(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, trafficRouting.Service, err.Error())
|
||||
// not found, wait a moment, retry
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
// canary service name
|
||||
canaryServiceName := fmt.Sprintf("%s-canary", trafficRouting.Service)
|
||||
// fetch canary service
|
||||
canaryService := &corev1.Service{}
|
||||
err = m.Get(context.TODO(), client.ObjectKey{Namespace: c.Rollout.Namespace, Name: canaryServiceName}, canaryService)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("rollout(%s/%s) get canary service(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, canaryServiceName, err.Error())
|
||||
return false, err
|
||||
} else if errors.IsNotFound(err) {
|
||||
canaryService, err = m.createCanaryService(c, canaryServiceName, *stableService.Spec.DeepCopy())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// patch canary service only selector the canary pods
|
||||
if canaryService.Spec.Selector[c.Workload.RevisionLabelKey] != canaryStatus.PodTemplateHash {
|
||||
body := fmt.Sprintf(`{"spec":{"selector":{"%s":"%s"}}}`, c.Workload.RevisionLabelKey, canaryStatus.PodTemplateHash)
|
||||
if err = m.Patch(context.TODO(), canaryService, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch canary service(%s) selector failed: %s", c.Rollout.Namespace, c.Rollout.Name, canaryService.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
// update canary service time, and wait 3 seconds, just to be safe
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
klog.Infof("rollout(%s/%s) patch canary service(%s) selector(%s=%s) success",
|
||||
c.Rollout.Namespace, c.Rollout.Name, canaryService.Name, c.Workload.RevisionLabelKey, canaryStatus.PodTemplateHash)
|
||||
}
|
||||
// patch stable service only selector the stable pods
|
||||
if stableService.Spec.Selector[c.Workload.RevisionLabelKey] != canaryStatus.StableRevision {
|
||||
body := fmt.Sprintf(`{"spec":{"selector":{"%s":"%s"}}}`, c.Workload.RevisionLabelKey, canaryStatus.StableRevision)
|
||||
if err = m.Patch(context.TODO(), stableService, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch stable service(%s) selector failed: %s", c.Rollout.Namespace, c.Rollout.Name, stableService.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
// update stable service time, and wait 3 seconds, just to be safe
|
||||
canaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
klog.Infof("add rollout(%s/%s) stable service(%s) selector(%s=%s) success",
|
||||
c.Rollout.Namespace, c.Rollout.Name, stableService.Name, c.Workload.RevisionLabelKey, canaryStatus.StableRevision)
|
||||
return false, nil
|
||||
}
|
||||
// After modify stable service configuration, give the network provider 3 seconds to react
|
||||
if verifyTime := canaryStatus.LastUpdateTime.Add(time.Second * time.Duration(trafficRouting.GracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("rollout(%s/%s) update service selector, and wait 3 seconds", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// new network provider, ingress or gateway
|
||||
trController, err := newNetworkProvider(m.Client, c.Rollout, c.NewStatus, stableService.Name, canaryService.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) newNetworkProvider failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
cStep := c.Rollout.Spec.Strategy.Canary.Steps[canaryStatus.CurrentStepIndex-1]
|
||||
steps := len(c.Rollout.Spec.Strategy.Canary.Steps)
|
||||
cond := util.GetRolloutCondition(*c.NewStatus, v1alpha1.RolloutConditionProgressing)
|
||||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and doing traffic routing", canaryStatus.CurrentStepIndex, steps)
|
||||
verify, err := trController.EnsureRoutes(context.TODO(), cStep.Weight, cStep.Matches)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if !verify {
|
||||
klog.Infof("rollout(%s/%s) is doing step(%d) trafficRouting(%s)", c.Rollout.Namespace, c.Rollout.Name, canaryStatus.CurrentStepIndex, util.DumpJSON(cStep))
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) do step(%d) trafficRouting(%s) success", c.Rollout.Namespace, c.Rollout.Name, canaryStatus.CurrentStepIndex, util.DumpJSON(cStep))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Manager) FinalisingTrafficRouting(c *util.RolloutContext, onlyRestoreStableService bool) (bool, error) {
|
||||
if len(c.Rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
trafficRouting := c.Rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
if trafficRouting.GracePeriodSeconds <= 0 {
|
||||
trafficRouting.GracePeriodSeconds = defaultGracePeriodSeconds
|
||||
}
|
||||
if c.NewStatus.CanaryStatus == nil {
|
||||
c.NewStatus.CanaryStatus = &v1alpha1.CanaryStatus{}
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) start finalising traffic routing", c.Rollout.Namespace, c.Rollout.Name)
|
||||
// remove stable service the pod revision selector, so stable service will be selector all version pods.
|
||||
verify, err := m.restoreStableService(c)
|
||||
if err != nil || !verify {
|
||||
return false, err
|
||||
} else if onlyRestoreStableService {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
cServiceName := fmt.Sprintf("%s-canary", trafficRouting.Service)
|
||||
trController, err := newNetworkProvider(m.Client, c.Rollout, c.NewStatus, trafficRouting.Service, cServiceName)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) newTrafficRoutingController failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
// First route 100% traffic to stable service
|
||||
verify, err = trController.EnsureRoutes(context.TODO(), utilpointer.Int32(0), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if !verify {
|
||||
c.NewStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
return false, nil
|
||||
}
|
||||
if c.NewStatus.CanaryStatus.LastUpdateTime != nil {
|
||||
// After restore the stable service configuration, give network provider 3 seconds to react
|
||||
if verifyTime := c.NewStatus.CanaryStatus.LastUpdateTime.Add(time.Second * time.Duration(trafficRouting.GracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("rollout(%s/%s) route 100% traffic to stable service, and wait a moment", c.Rollout.Namespace, c.Rollout.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// modify network(ingress & gateway api) configuration, route all traffic to stable service
|
||||
if err = trController.Finalise(context.TODO()); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// remove canary service
|
||||
cService := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: c.Rollout.Namespace, Name: cServiceName}}
|
||||
err = m.Delete(context.TODO(), cService)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("rollout(%s/%s) remove canary service(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, cService.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) remove canary service(%s) success", c.Rollout.Namespace, c.Rollout.Name, cService.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func newNetworkProvider(c client.Client, rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus, sService, cService string) (network.NetworkProvider, error) {
|
||||
trafficRouting := rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
if trafficRouting.Ingress != nil {
|
||||
return ingress.NewIngressTrafficRouting(c, ingress.Config{
|
||||
RolloutName: rollout.Name,
|
||||
RolloutNs: rollout.Namespace,
|
||||
CanaryService: cService,
|
||||
StableService: sService,
|
||||
TrafficConf: trafficRouting.Ingress,
|
||||
OwnerRef: *metav1.NewControllerRef(rollout, rolloutControllerKind),
|
||||
})
|
||||
}
|
||||
if trafficRouting.Gateway != nil {
|
||||
return gateway.NewGatewayTrafficRouting(c, gateway.Config{
|
||||
RolloutName: rollout.Name,
|
||||
RolloutNs: rollout.Namespace,
|
||||
CanaryService: cService,
|
||||
StableService: sService,
|
||||
TrafficConf: trafficRouting.Gateway,
|
||||
})
|
||||
}
|
||||
return nil, fmt.Errorf("TrafficRouting current only support Ingress or Gateway API")
|
||||
}
|
||||
|
||||
func (m *Manager) createCanaryService(c *util.RolloutContext, cService string, spec corev1.ServiceSpec) (*corev1.Service, error) {
|
||||
canaryService := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: c.Rollout.Namespace,
|
||||
Name: cService,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(c.Rollout, rolloutControllerKind)},
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
|
||||
// set field nil
|
||||
canaryService.Spec.ClusterIP = ""
|
||||
canaryService.Spec.ClusterIPs = nil
|
||||
canaryService.Spec.ExternalIPs = nil
|
||||
canaryService.Spec.IPFamilyPolicy = nil
|
||||
canaryService.Spec.IPFamilies = nil
|
||||
canaryService.Spec.LoadBalancerIP = ""
|
||||
canaryService.Spec.Selector[c.Workload.RevisionLabelKey] = c.NewStatus.CanaryStatus.PodTemplateHash
|
||||
err := m.Create(context.TODO(), canaryService)
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("rollout(%s/%s) create canary service(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, cService, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) create canary service(%s) success", c.Rollout.Namespace, c.Rollout.Name, util.DumpJSON(canaryService))
|
||||
return canaryService, nil
|
||||
}
|
||||
|
||||
// remove stable service the pod revision selector, so stable service will be selector all version pods.
|
||||
func (m *Manager) restoreStableService(c *util.RolloutContext) (bool, error) {
|
||||
if c.Workload == nil {
|
||||
return true, nil
|
||||
}
|
||||
trafficRouting := c.Rollout.Spec.Strategy.Canary.TrafficRoutings[0]
|
||||
//fetch stable service
|
||||
stableService := &corev1.Service{}
|
||||
err := m.Get(context.TODO(), client.ObjectKey{Namespace: c.Rollout.Namespace, Name: trafficRouting.Service}, stableService)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
klog.Errorf("rollout(%s/%s) get stable service(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, trafficRouting.Service, err.Error())
|
||||
return false, err
|
||||
}
|
||||
if stableService.Spec.Selector[c.Workload.RevisionLabelKey] != "" {
|
||||
body := fmt.Sprintf(`{"spec":{"selector":{"%s":null}}}`, c.Workload.RevisionLabelKey)
|
||||
if err = m.Patch(context.TODO(), stableService, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) patch stable service(%s) failed: %s", c.Rollout.Namespace, c.Rollout.Name, trafficRouting.Service, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("remove rollout(%s/%s) stable service(%s) pod revision selector, and wait a moment", c.Rollout.Namespace, c.Rollout.Name, trafficRouting.Service)
|
||||
c.NewStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
return false, nil
|
||||
}
|
||||
if c.NewStatus.CanaryStatus.LastUpdateTime == nil {
|
||||
return true, nil
|
||||
}
|
||||
// After restore the stable service configuration, give network provider 3 seconds to react
|
||||
if verifyTime := c.NewStatus.CanaryStatus.LastUpdateTime.Add(time.Second * time.Duration(trafficRouting.GracePeriodSeconds)); verifyTime.After(time.Now()) {
|
||||
klog.Infof("rollout(%s/%s) restoring stable service(%s), and wait a moment", c.Rollout.Namespace, c.Rollout.Name, trafficRouting.Service)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) doFinalising stable service(%s) success", c.Rollout.Namespace, c.Rollout.Name, trafficRouting.Service)
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,677 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package trafficrouting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kruisev1aplphal "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"github.com/openkruise/rollouts/pkg/util/configuration"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
scheme *runtime.Scheme
|
||||
nginxIngressAnnotationDefaultPrefix = "nginx.ingress.kubernetes.io"
|
||||
|
||||
demoService = corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoserver",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"app": "echoserver",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
demoIngress = netv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoserver",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: netv1.IngressSpec{
|
||||
Rules: []netv1.IngressRule{
|
||||
{
|
||||
Host: "echoserver.example.com",
|
||||
IngressRuleValue: netv1.IngressRuleValue{
|
||||
HTTP: &netv1.HTTPIngressRuleValue{
|
||||
Paths: []netv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/apis/echo",
|
||||
Backend: netv1.IngressBackend{
|
||||
Service: &netv1.IngressServiceBackend{
|
||||
Name: "echoserver",
|
||||
Port: netv1.ServiceBackendPort{
|
||||
Name: "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
demoRollout = &v1alpha1.Rollout{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rollout-demo",
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
util.RolloutHashAnnotation: "rollout-hash-v1",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.RolloutSpec{
|
||||
ObjectRef: v1alpha1.ObjectRef{
|
||||
WorkloadRef: &v1alpha1.WorkloadRef{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
Strategy: v1alpha1.RolloutStrategy{
|
||||
Canary: &v1alpha1.CanaryStrategy{
|
||||
Steps: []v1alpha1.CanaryStep{
|
||||
{
|
||||
Weight: utilpointer.Int32(5),
|
||||
Replicas: &intstr.IntOrString{IntVal: 1},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(20),
|
||||
Replicas: &intstr.IntOrString{IntVal: 2},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(60),
|
||||
Replicas: &intstr.IntOrString{IntVal: 6},
|
||||
},
|
||||
{
|
||||
Weight: utilpointer.Int32(100),
|
||||
Replicas: &intstr.IntOrString{IntVal: 10},
|
||||
},
|
||||
},
|
||||
TrafficRoutings: []*v1alpha1.TrafficRouting{
|
||||
{
|
||||
Service: "echoserver",
|
||||
Ingress: &v1alpha1.IngressTrafficRouting{
|
||||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.RolloutStatus{
|
||||
Phase: v1alpha1.RolloutPhaseProgressing,
|
||||
CanaryStatus: &v1alpha1.CanaryStatus{
|
||||
ObservedWorkloadGeneration: 1,
|
||||
RolloutHash: "rollout-hash-v1",
|
||||
ObservedRolloutID: "rollout-id-1",
|
||||
StableRevision: "podtemplatehash-v1",
|
||||
CanaryRevision: "revision-v2",
|
||||
CurrentStepIndex: 1,
|
||||
CurrentStepState: v1alpha1.CanaryStepStateTrafficRouting,
|
||||
PodTemplateHash: "podtemplatehash-v2",
|
||||
LastUpdateTime: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
Conditions: []v1alpha1.RolloutCondition{
|
||||
{
|
||||
Type: v1alpha1.RolloutConditionProgressing,
|
||||
Reason: v1alpha1.ProgressingReasonInRolling,
|
||||
Status: corev1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
demoConf = corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configuration.RolloutConfigurationName,
|
||||
Namespace: util.GetRolloutNamespace(),
|
||||
},
|
||||
Data: map[string]string{
|
||||
fmt.Sprintf("%s.nginx", configuration.LuaTrafficRoutingIngressTypePrefix): `
|
||||
annotations = obj.annotations
|
||||
annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-cookie"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-pattern"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-value"] = nil
|
||||
annotations["nginx.ingress.kubernetes.io/canary-weight"] = nil
|
||||
if ( obj.weight ~= "-1" )
|
||||
then
|
||||
annotations["nginx.ingress.kubernetes.io/canary-weight"] = obj.weight
|
||||
end
|
||||
if ( not obj.matches )
|
||||
then
|
||||
return annotations
|
||||
end
|
||||
for _,match in ipairs(obj.matches) do
|
||||
header = match.headers[1]
|
||||
if ( header.name == "canary-by-cookie" )
|
||||
then
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-cookie"] = header.value
|
||||
else
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header"] = header.name
|
||||
if ( header.type == "RegularExpression" )
|
||||
then
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-pattern"] = header.value
|
||||
else
|
||||
annotations["nginx.ingress.kubernetes.io/canary-by-header-value"] = header.value
|
||||
end
|
||||
end
|
||||
end
|
||||
return annotations
|
||||
`,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
scheme = runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = kruisev1aplphal.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
func TestDoTrafficRouting(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getObj func() ([]*corev1.Service, []*netv1.Ingress)
|
||||
getRollout func() (*v1alpha1.Rollout, *util.Workload)
|
||||
expectObj func() ([]*corev1.Service, []*netv1.Ingress)
|
||||
expectDone bool
|
||||
}{
|
||||
{
|
||||
name: "DoTrafficRouting test1",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
return []*corev1.Service{demoService.DeepCopy()}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
return demoRollout.DeepCopy(), &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
{
|
||||
name: "DoTrafficRouting test2",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
{
|
||||
name: "DoTrafficRouting test3",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{demoIngress.DeepCopy()}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now().Add(-10 * time.Second)}
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "5"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
{
|
||||
name: "DoTrafficRouting test4",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "5"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now().Add(-10 * time.Second)}
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "5"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: true,
|
||||
},
|
||||
{
|
||||
name: "DoTrafficRouting test5",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "5"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now().Add(-10 * time.Second)}
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 2
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "20"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
ss, ig := cs.getObj()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(ig[0], ss[0], demoConf.DeepCopy()).Build()
|
||||
if len(ss) == 2 {
|
||||
_ = client.Create(context.TODO(), ss[1])
|
||||
}
|
||||
if len(ig) == 2 {
|
||||
_ = client.Create(context.TODO(), ig[1])
|
||||
}
|
||||
c := &util.RolloutContext{}
|
||||
c.Rollout, c.Workload = cs.getRollout()
|
||||
c.NewStatus = c.Rollout.Status.DeepCopy()
|
||||
manager := NewTrafficRoutingManager(client)
|
||||
err := manager.InitializeTrafficRouting(c)
|
||||
if err != nil {
|
||||
t.Fatalf("InitializeTrafficRouting failed: %s", err)
|
||||
}
|
||||
done, err := manager.DoTrafficRouting(c)
|
||||
if err != nil {
|
||||
t.Fatalf("DoTrafficRouting failed: %s", err)
|
||||
}
|
||||
if cs.expectDone != done {
|
||||
t.Fatalf("DoTrafficRouting expect(%v), but get(%v)", cs.expectDone, done)
|
||||
}
|
||||
ss, ig = cs.expectObj()
|
||||
for _, obj := range ss {
|
||||
checkObjEqual(client, t, obj)
|
||||
}
|
||||
for _, obj := range ig {
|
||||
checkObjEqual(client, t, obj)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalisingTrafficRouting(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
getObj func() ([]*corev1.Service, []*netv1.Ingress)
|
||||
getRollout func() (*v1alpha1.Rollout, *util.Workload)
|
||||
onlyRestoreStableService bool
|
||||
expectObj func() ([]*corev1.Service, []*netv1.Ingress)
|
||||
expectDone bool
|
||||
}{
|
||||
{
|
||||
name: "FinalisingTrafficRouting test1",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s1.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v1"
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
onlyRestoreStableService: true,
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
{
|
||||
name: "FinalisingTrafficRouting test2",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
onlyRestoreStableService: true,
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
{
|
||||
name: "FinalisingTrafficRouting test3",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now().Add(-10 * time.Second)}
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
onlyRestoreStableService: true,
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: true,
|
||||
},
|
||||
{
|
||||
name: "FinalisingTrafficRouting test4",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "100"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now().Add(-3 * time.Second)}
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
onlyRestoreStableService: false,
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "0"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
expectDone: false,
|
||||
},
|
||||
{
|
||||
name: "FinalisingTrafficRouting test5",
|
||||
getObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
s2 := demoService.DeepCopy()
|
||||
s2.Name = "echoserver-canary"
|
||||
s2.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey] = "podtemplatehash-v2"
|
||||
c1 := demoIngress.DeepCopy()
|
||||
c2 := demoIngress.DeepCopy()
|
||||
c2.Name = "echoserver-canary"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)] = "true"
|
||||
c2.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)] = "0"
|
||||
c2.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*corev1.Service{s1, s2}, []*netv1.Ingress{c1, c2}
|
||||
},
|
||||
getRollout: func() (*v1alpha1.Rollout, *util.Workload) {
|
||||
obj := demoRollout.DeepCopy()
|
||||
obj.Status.CanaryStatus.CurrentStepState = v1alpha1.CanaryStepStateCompleted
|
||||
obj.Status.CanaryStatus.CurrentStepIndex = 4
|
||||
obj.Status.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now().Add(-3 * time.Second)}
|
||||
return obj, &util.Workload{RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey}
|
||||
},
|
||||
onlyRestoreStableService: false,
|
||||
expectObj: func() ([]*corev1.Service, []*netv1.Ingress) {
|
||||
s1 := demoService.DeepCopy()
|
||||
c1 := demoIngress.DeepCopy()
|
||||
return []*corev1.Service{s1}, []*netv1.Ingress{c1}
|
||||
},
|
||||
expectDone: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
ss, ig := cs.getObj()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(ig[0], ss[0], demoConf.DeepCopy()).Build()
|
||||
if len(ss) == 2 {
|
||||
_ = client.Create(context.TODO(), ss[1])
|
||||
}
|
||||
if len(ig) == 2 {
|
||||
_ = client.Create(context.TODO(), ig[1])
|
||||
}
|
||||
c := &util.RolloutContext{}
|
||||
c.Rollout, c.Workload = cs.getRollout()
|
||||
c.NewStatus = c.Rollout.Status.DeepCopy()
|
||||
manager := NewTrafficRoutingManager(client)
|
||||
done, err := manager.FinalisingTrafficRouting(c, cs.onlyRestoreStableService)
|
||||
if err != nil {
|
||||
t.Fatalf("DoTrafficRouting failed: %s", err)
|
||||
}
|
||||
if cs.expectDone != done {
|
||||
t.Fatalf("DoTrafficRouting expect(%v), but get(%v)", cs.expectDone, done)
|
||||
}
|
||||
ss, ig = cs.expectObj()
|
||||
for _, obj := range ss {
|
||||
checkObjEqual(client, t, obj)
|
||||
}
|
||||
for _, obj := range ig {
|
||||
checkObjEqual(client, t, obj)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkObjEqual(c client.WithWatch, t *testing.T, expect client.Object) {
|
||||
gvk := expect.GetObjectKind().GroupVersionKind()
|
||||
obj := getEmptyObject(gvk)
|
||||
err := c.Get(context.TODO(), client.ObjectKey{Namespace: expect.GetNamespace(), Name: expect.GetName()}, obj)
|
||||
if err != nil {
|
||||
t.Fatalf("get object failed: %s", err.Error())
|
||||
}
|
||||
switch gvk.Kind {
|
||||
case "Service":
|
||||
s1 := obj.(*corev1.Service)
|
||||
s2 := expect.(*corev1.Service)
|
||||
if !reflect.DeepEqual(s1.Spec, s2.Spec) {
|
||||
t.Fatalf("expect(%s), but get object(%s)", util.DumpJSON(s2.Spec), util.DumpJSON(s1.Spec))
|
||||
}
|
||||
case "Ingress":
|
||||
s1 := obj.(*netv1.Ingress)
|
||||
s2 := expect.(*netv1.Ingress)
|
||||
if !reflect.DeepEqual(s1.Spec, s2.Spec) || !reflect.DeepEqual(s1.Annotations, s2.Annotations) {
|
||||
t.Fatalf("expect(%s), but get object(%s)", util.DumpJSON(s2), util.DumpJSON(s1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getEmptyObject(gvk schema.GroupVersionKind) client.Object {
|
||||
switch gvk.Kind {
|
||||
case "Service":
|
||||
return &corev1.Service{}
|
||||
case "Ingress":
|
||||
return &netv1.Ingress{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,12 +1,9 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -21,7 +18,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting/network"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
|
@ -45,7 +42,7 @@ type gatewayController struct {
|
|||
}
|
||||
|
||||
// NewGatewayTrafficRouting The Gateway API is a part of the SIG Network.
|
||||
func NewGatewayTrafficRouting(client client.Client, conf Config) (trafficrouting.Controller, error) {
|
||||
func NewGatewayTrafficRouting(client client.Client, conf Config) (network.NetworkProvider, error) {
|
||||
r := &gatewayController{
|
||||
Client: client,
|
||||
conf: conf,
|
||||
|
|
@ -86,20 +83,20 @@ func (r *gatewayController) EnsureRoutes(ctx context.Context, weight *int32, mat
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func (r *gatewayController) Finalise(ctx context.Context) (bool, error) {
|
||||
func (r *gatewayController) Finalise(ctx context.Context) error {
|
||||
httpRoute := &gatewayv1alpha2.HTTPRoute{}
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: r.conf.RolloutNs, Name: *r.conf.TrafficConf.HTTPRouteName}, httpRoute)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("rollout(%s/%s) get HTTPRoute failed: %s", r.conf.RolloutNs, r.conf.RolloutName, err.Error())
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
// desired rule
|
||||
desiredRule := r.buildDesiredHTTPRoute(httpRoute.Spec.Rules, utilpointer.Int32(-1), nil)
|
||||
if reflect.DeepEqual(httpRoute.Spec.Rules, desiredRule) {
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
routeClone := &gatewayv1alpha2.HTTPRoute{}
|
||||
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
|
|
@ -111,10 +108,10 @@ func (r *gatewayController) Finalise(ctx context.Context) (bool, error) {
|
|||
return r.Client.Update(context.TODO(), routeClone)
|
||||
}); err != nil {
|
||||
klog.Errorf("update rollout(%s/%s) httpRoute(%s) failed: %s", r.conf.RolloutNs, r.conf.RolloutName, httpRoute.Name, err.Error())
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) TrafficRouting Finalise success", r.conf.RolloutNs, r.conf.RolloutName)
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *gatewayController) buildDesiredHTTPRoute(rules []gatewayv1alpha2.HTTPRouteRule, weight *int32, matches []rolloutv1alpha1.HttpRouteMatch) []gatewayv1alpha2.HTTPRouteRule {
|
||||
|
|
@ -1,12 +1,9 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/trafficrouting"
|
||||
"github.com/openkruise/rollouts/pkg/trafficrouting/network"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
"github.com/openkruise/rollouts/pkg/util/configuration"
|
||||
"github.com/openkruise/rollouts/pkg/util/luamanager"
|
||||
|
|
@ -58,7 +58,7 @@ type Config struct {
|
|||
OwnerRef metav1.OwnerReference
|
||||
}
|
||||
|
||||
func NewIngressTrafficRouting(client client.Client, conf Config) (trafficrouting.Controller, error) {
|
||||
func NewIngressTrafficRouting(client client.Client, conf Config) (network.NetworkProvider, error) {
|
||||
r := &ingressController{
|
||||
Client: client,
|
||||
conf: conf,
|
||||
|
|
@ -108,6 +108,9 @@ func (r *ingressController) EnsureRoutes(ctx context.Context, weight *int32, mat
|
|||
canaryIngress := &netv1.Ingress{}
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: r.conf.RolloutNs, Name: defaultCanaryIngressName(r.conf.TrafficConf.Name)}, canaryIngress)
|
||||
if err != nil {
|
||||
if weight != nil && *weight == 0 && errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
klog.Errorf("rollout(%s/%s) get canary ingress failed: %s", r.conf.RolloutNs, r.conf.RolloutName, err.Error())
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -135,48 +138,23 @@ func (r *ingressController) EnsureRoutes(ctx context.Context, weight *int32, mat
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func (r *ingressController) Finalise(ctx context.Context) (bool, error) {
|
||||
func (r *ingressController) Finalise(ctx context.Context) error {
|
||||
canaryIngress := &netv1.Ingress{}
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: r.conf.RolloutNs, Name: r.canaryIngressName}, canaryIngress)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("rollout(%s/%s) get canary ingress(%s) failed: %s", r.conf.RolloutNs, r.conf.RolloutName, r.canaryIngressName, err.Error())
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else if !canaryIngress.DeletionTimestamp.IsZero() {
|
||||
return false, nil
|
||||
if errors.IsNotFound(err) || !canaryIngress.DeletionTimestamp.IsZero() {
|
||||
return nil
|
||||
}
|
||||
// First, set canary route 0 weight.
|
||||
newAnnotations, err := r.executeLuaForCanary(canaryIngress.Annotations, utilpointer.Int32(0), nil)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) execute lua failed: %s", r.conf.RolloutNs, r.conf.RolloutName, err.Error())
|
||||
return false, err
|
||||
}
|
||||
if !reflect.DeepEqual(canaryIngress.Annotations, newAnnotations) {
|
||||
byte1, _ := json.Marshal(metav1.ObjectMeta{Annotations: canaryIngress.Annotations})
|
||||
byte2, _ := json.Marshal(metav1.ObjectMeta{Annotations: newAnnotations})
|
||||
patch, err := jsonpatch.CreateMergePatch(byte1, byte2)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) create merge patch failed: %s", r.conf.RolloutNs, r.conf.RolloutName, err.Error())
|
||||
return false, err
|
||||
}
|
||||
body := fmt.Sprintf(`{"metadata":%s}`, string(patch))
|
||||
if err = r.Patch(ctx, canaryIngress, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) set canary ingress(%s) failed: %s", r.conf.RolloutNs, r.conf.RolloutName, canaryIngress.Name, err.Error())
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) set canary ingress annotations(%s) success", r.conf.RolloutNs, r.conf.RolloutName, util.DumpJSON(newAnnotations))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Second, delete canary ingress
|
||||
// immediate delete canary ingress
|
||||
if err = r.Delete(ctx, canaryIngress); err != nil {
|
||||
klog.Errorf("rollout(%s/%s) remove canary ingress(%s) failed: %s", r.conf.RolloutNs, r.conf.RolloutName, canaryIngress.Name, err.Error())
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) remove canary ingress(%s) success", r.conf.RolloutNs, r.conf.RolloutName, canaryIngress.Name)
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ingressController) buildCanaryIngress(stableIngress *netv1.Ingress) *netv1.Ingress {
|
||||
|
|
@ -1,12 +1,9 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -85,7 +82,6 @@ var (
|
|||
string.gsub(input, '[^' .. delimiter ..']+', function(w) table.insert(arr, w) end)
|
||||
return arr
|
||||
end
|
||||
|
||||
annotations = obj.annotations
|
||||
annotations["alb.ingress.kubernetes.io/canary"] = "true"
|
||||
annotations["alb.ingress.kubernetes.io/canary-by-cookie"] = nil
|
||||
|
|
@ -103,6 +99,16 @@ var (
|
|||
then
|
||||
return annotations
|
||||
end
|
||||
if ( annotations["alb.ingress.kubernetes.io/backend-svcs-protocols"] )
|
||||
then
|
||||
protocolobj = json.decode(annotations["alb.ingress.kubernetes.io/backend-svcs-protocols"])
|
||||
newprotocolobj = {}
|
||||
for _, v in pairs(protocolobj) do
|
||||
newprotocolobj[obj.canaryService] = v
|
||||
end
|
||||
annotations["alb.ingress.kubernetes.io/backend-svcs-protocols"] = json.encode(newprotocolobj)
|
||||
end
|
||||
|
||||
conditions = {}
|
||||
match = obj.matches[1]
|
||||
for _,header in ipairs(match.headers) do
|
||||
|
|
@ -445,6 +451,7 @@ func TestEnsureRoutes(t *testing.T) {
|
|||
canary.Name = "echoserver-canary"
|
||||
canary.Annotations["alb.ingress.kubernetes.io/canary"] = "true"
|
||||
canary.Annotations["alb.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
canary.Annotations["alb.ingress.kubernetes.io/backend-svcs-protocols"] = `{"echoserver":"http"}`
|
||||
canary.Spec.Rules[0].HTTP.Paths = canary.Spec.Rules[0].HTTP.Paths[:1]
|
||||
canary.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
canary.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
|
|
@ -475,6 +482,7 @@ func TestEnsureRoutes(t *testing.T) {
|
|||
expect := demoIngress.DeepCopy()
|
||||
expect.Name = "echoserver-canary"
|
||||
expect.Annotations["alb.ingress.kubernetes.io/canary"] = "true"
|
||||
expect.Annotations["alb.ingress.kubernetes.io/backend-svcs-protocols"] = `{"echoserver-canary":"http"}`
|
||||
expect.Annotations["alb.ingress.kubernetes.io/conditions.echoserver-canary"] = `[{"cookieConfig":{"values":[{"key":"demo1","value":"value1"},{"key":"demo2","value":"value2"}]},"type":"Cookie"},{"sourceIpConfig":{"values":["192.168.0.0/16","172.16.0.0/16"]},"type":"SourceIp"},{"headerConfig":{"key":"headername","values":["headervalue1","headervalue2"]},"type":"Header"}]`
|
||||
expect.Spec.Rules[0].HTTP.Paths = expect.Spec.Rules[0].HTTP.Paths[:1]
|
||||
expect.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
|
|
@ -538,34 +546,6 @@ func TestFinalise(t *testing.T) {
|
|||
getConfigmap: func() *corev1.ConfigMap {
|
||||
return demoConf.DeepCopy()
|
||||
},
|
||||
getIngress: func() []*netv1.Ingress {
|
||||
canary := demoIngress.DeepCopy()
|
||||
canary.Name = "echoserver-canary"
|
||||
canary.Annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
canary.Annotations["nginx.ingress.kubernetes.io/canary-by-cookie"] = "demo"
|
||||
canary.Annotations["nginx.ingress.kubernetes.io/canary-by-header"] = "user_id"
|
||||
canary.Annotations["nginx.ingress.kubernetes.io/canary-by-header-value"] = "123456"
|
||||
canary.Spec.Rules[0].HTTP.Paths = canary.Spec.Rules[0].HTTP.Paths[:1]
|
||||
canary.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
canary.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return []*netv1.Ingress{demoIngress.DeepCopy(), canary}
|
||||
},
|
||||
expectIngress: func() *netv1.Ingress {
|
||||
expect := demoIngress.DeepCopy()
|
||||
expect.Name = "echoserver-canary"
|
||||
expect.Annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
expect.Annotations["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
expect.Spec.Rules[0].HTTP.Paths = expect.Spec.Rules[0].HTTP.Paths[:1]
|
||||
expect.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
expect.Spec.Rules[1].HTTP.Paths[0].Backend.Service.Name = "echoserver-canary"
|
||||
return expect
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "finalise test2",
|
||||
getConfigmap: func() *corev1.ConfigMap {
|
||||
return demoConf.DeepCopy()
|
||||
},
|
||||
getIngress: func() []*netv1.Ingress {
|
||||
canary := demoIngress.DeepCopy()
|
||||
canary.Name = "echoserver-canary"
|
||||
|
|
@ -602,7 +582,7 @@ func TestFinalise(t *testing.T) {
|
|||
t.Fatalf("NewIngressTrafficRouting failed: %s", err.Error())
|
||||
return
|
||||
}
|
||||
_, err = controller.Finalise(context.TODO())
|
||||
err = controller.Finalise(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("EnsureRoutes failed: %s", err.Error())
|
||||
return
|
||||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package trafficrouting
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
@ -22,11 +22,10 @@ import (
|
|||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
)
|
||||
|
||||
// Controller common function across all TrafficRouting implementation
|
||||
type Controller interface {
|
||||
// Initialize will validate the traffic routing resource
|
||||
// 1. Ingress type, verify the existence of the ingress resource and generate the canary ingress[weight=0%]
|
||||
// 2. Gateway type, verify the existence of the gateway resource
|
||||
// NetworkProvider common function across all TrafficRouting implementation
|
||||
type NetworkProvider interface {
|
||||
// Initialize determine if the network resources(ingress & gateway api) exist.
|
||||
// If it is Ingress, init method will create the canary ingress resources, and set weight=0.
|
||||
Initialize(ctx context.Context) error
|
||||
// EnsureRoutes check and set canary weight and matches.
|
||||
// weight indicates percentage of traffic to canary service, and range of values[0,100]
|
||||
|
|
@ -37,7 +36,5 @@ type Controller interface {
|
|||
EnsureRoutes(ctx context.Context, weight *int32, matches []rolloutv1alpha1.HttpRouteMatch) (bool, error)
|
||||
// Finalise will do some cleanup work after the canary rollout complete, such as delete canary ingress.
|
||||
// Finalise is called with a 3-second delay after completing the canary.
|
||||
// bool indicates whether function Finalise is complete,
|
||||
// for example, when ingress type, only canary ingress Not Found is considered function finalise complete
|
||||
Finalise(ctx context.Context) (bool, error)
|
||||
Finalise(ctx context.Context) error
|
||||
}
|
||||
|
|
@ -17,14 +17,14 @@ limitations under the License.
|
|||
package util
|
||||
|
||||
import (
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NewRolloutCondition creates a new rollout condition.
|
||||
func NewRolloutCondition(condType rolloutv1alpha1.RolloutConditionType, status corev1.ConditionStatus, reason, message string) *rolloutv1alpha1.RolloutCondition {
|
||||
return &rolloutv1alpha1.RolloutCondition{
|
||||
func NewRolloutCondition(condType v1alpha1.RolloutConditionType, status corev1.ConditionStatus, reason, message string) *v1alpha1.RolloutCondition {
|
||||
return &v1alpha1.RolloutCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
|
|
@ -35,7 +35,7 @@ func NewRolloutCondition(condType rolloutv1alpha1.RolloutConditionType, status c
|
|||
}
|
||||
|
||||
// GetRolloutCondition returns the condition with the provided type.
|
||||
func GetRolloutCondition(status rolloutv1alpha1.RolloutStatus, condType rolloutv1alpha1.RolloutConditionType) *rolloutv1alpha1.RolloutCondition {
|
||||
func GetRolloutCondition(status v1alpha1.RolloutStatus, condType v1alpha1.RolloutConditionType) *v1alpha1.RolloutCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
|
|
@ -48,9 +48,10 @@ func GetRolloutCondition(status rolloutv1alpha1.RolloutStatus, condType rolloutv
|
|||
// SetRolloutCondition updates the rollout to include the provided condition. If the condition that
|
||||
// we are about to add already exists and has the same status and reason, then we are not going to update
|
||||
// by returning false. Returns true if the condition was updated
|
||||
func SetRolloutCondition(status *rolloutv1alpha1.RolloutStatus, condition rolloutv1alpha1.RolloutCondition) bool {
|
||||
func SetRolloutCondition(status *v1alpha1.RolloutStatus, condition v1alpha1.RolloutCondition) bool {
|
||||
currentCond := GetRolloutCondition(*status, condition.Type)
|
||||
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
|
||||
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason &&
|
||||
currentCond.Message == condition.Message {
|
||||
return false
|
||||
}
|
||||
// Do not update lastTransitionTime if the status of the condition doesn't change.
|
||||
|
|
@ -62,14 +63,9 @@ func SetRolloutCondition(status *rolloutv1alpha1.RolloutStatus, condition rollou
|
|||
return true
|
||||
}
|
||||
|
||||
// RemoveRolloutCondition removes the rollout condition with the provided type.
|
||||
func RemoveRolloutCondition(status *rolloutv1alpha1.RolloutStatus, condType rolloutv1alpha1.RolloutConditionType) {
|
||||
status.Conditions = filterOutCondition(status.Conditions, condType)
|
||||
}
|
||||
|
||||
// filterOutCondition returns a new slice of rollout conditions without conditions with the provided type.
|
||||
func filterOutCondition(conditions []rolloutv1alpha1.RolloutCondition, condType rolloutv1alpha1.RolloutConditionType) []rolloutv1alpha1.RolloutCondition {
|
||||
var newConditions []rolloutv1alpha1.RolloutCondition
|
||||
func filterOutCondition(conditions []v1alpha1.RolloutCondition, condType v1alpha1.RolloutConditionType) []v1alpha1.RolloutCondition {
|
||||
var newConditions []v1alpha1.RolloutCondition
|
||||
for _, c := range conditions {
|
||||
if c.Type == condType {
|
||||
continue
|
||||
|
|
@ -78,3 +74,7 @@ func filterOutCondition(conditions []rolloutv1alpha1.RolloutCondition, condType
|
|||
}
|
||||
return newConditions
|
||||
}
|
||||
|
||||
func RemoveRolloutCondition(status *v1alpha1.RolloutStatus, condType v1alpha1.RolloutConditionType) {
|
||||
status.Conditions = filterOutCondition(status.Conditions, condType)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,8 +25,6 @@ const (
|
|||
InRolloutProgressingAnnotation = "rollouts.kruise.io/in-progressing"
|
||||
// RolloutHashAnnotation record observed rollout spec hash
|
||||
RolloutHashAnnotation = "rollouts.kruise.io/hash"
|
||||
// RollbackInBatchAnnotation allow use disable quick rollback, and will roll back in batch style.
|
||||
RollbackInBatchAnnotation = "rollouts.kruise.io/rollback-in-batch"
|
||||
)
|
||||
|
||||
// For Workloads
|
||||
|
|
@ -43,13 +41,6 @@ const (
|
|||
|
||||
// For Pods
|
||||
const (
|
||||
// RolloutIDLabel is designed to distinguish each workload revision publications.
|
||||
// The value of RolloutIDLabel corresponds Rollout.Spec.RolloutID.
|
||||
RolloutIDLabel = "rollouts.kruise.io/rollout-id"
|
||||
// RolloutBatchIDLabel is the label key of batch id that will be patched to pods during rollout.
|
||||
// Only when RolloutIDLabel is set, RolloutBatchIDLabel will be patched.
|
||||
// Users can use RolloutIDLabel and RolloutBatchIDLabel to select the pods that are upgraded in some certain batch and release.
|
||||
RolloutBatchIDLabel = "rollouts.kruise.io/rollout-batch-id"
|
||||
// NoNeedUpdatePodLabel will be patched to pod when rollback in batches if the pods no need to rollback
|
||||
NoNeedUpdatePodLabel = "rollouts.kruise.io/no-need-update"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
)
|
||||
|
||||
type RolloutContext struct {
|
||||
Rollout *v1alpha1.Rollout
|
||||
NewStatus *v1alpha1.RolloutStatus
|
||||
// related workload
|
||||
Workload *Workload
|
||||
// reconcile RequeueAfter recheckTime
|
||||
RecheckTime *time.Time
|
||||
// wait stable workload pods ready
|
||||
WaitReady bool
|
||||
}
|
||||
|
|
@ -47,10 +47,6 @@ type Workload struct {
|
|||
CanaryRevision string
|
||||
// pod template hash is used as service selector hash
|
||||
PodTemplateHash string
|
||||
// canary replicas
|
||||
CanaryReplicas int32
|
||||
// canary ready replicas
|
||||
CanaryReadyReplicas int32
|
||||
// Revision hash key
|
||||
RevisionLabelKey string
|
||||
|
||||
|
|
@ -133,8 +129,6 @@ func (r *ControllerFinder) getKruiseCloneSet(namespace string, ref *rolloutv1alp
|
|||
RevisionLabelKey: apps.DefaultDeploymentUniqueLabelKey,
|
||||
StableRevision: cloneSet.Status.CurrentRevision[strings.LastIndex(cloneSet.Status.CurrentRevision, "-")+1:],
|
||||
CanaryRevision: cloneSet.Status.UpdateRevision[strings.LastIndex(cloneSet.Status.UpdateRevision, "-")+1:],
|
||||
CanaryReplicas: cloneSet.Status.UpdatedReplicas,
|
||||
CanaryReadyReplicas: cloneSet.Status.UpdatedReadyReplicas,
|
||||
ObjectMeta: cloneSet.ObjectMeta,
|
||||
Replicas: *cloneSet.Spec.Replicas,
|
||||
PodTemplateHash: cloneSet.Status.UpdateRevision[strings.LastIndex(cloneSet.Status.UpdateRevision, "-")+1:],
|
||||
|
|
@ -145,7 +139,6 @@ func (r *ControllerFinder) getKruiseCloneSet(namespace string, ref *rolloutv1alp
|
|||
if _, ok = workload.Annotations[InRolloutProgressingAnnotation]; !ok {
|
||||
return workload, nil
|
||||
}
|
||||
|
||||
// in rollout progressing
|
||||
workload.InRolloutProgressing = true
|
||||
// Is it in rollback phase
|
||||
|
|
@ -193,7 +186,6 @@ func (r *ControllerFinder) getDeployment(namespace string, ref *rolloutv1alpha1.
|
|||
if _, ok = workload.Annotations[InRolloutProgressingAnnotation]; !ok {
|
||||
return workload, nil
|
||||
}
|
||||
|
||||
// in rollout progressing
|
||||
workload.InRolloutProgressing = true
|
||||
// workload is continuous release, indicates rollback(v1 -> v2 -> v1)
|
||||
|
|
@ -203,14 +195,11 @@ func (r *ControllerFinder) getDeployment(namespace string, ref *rolloutv1alpha1.
|
|||
workload.IsInRollback = true
|
||||
return workload, nil
|
||||
}
|
||||
|
||||
// canary deployment
|
||||
canary, err := r.getLatestCanaryDeployment(stable)
|
||||
if err != nil || canary == nil {
|
||||
return workload, err
|
||||
}
|
||||
workload.CanaryReplicas = canary.Status.Replicas
|
||||
workload.CanaryReadyReplicas = canary.Status.ReadyReplicas
|
||||
canaryRs, err := r.getDeploymentStableRs(canary)
|
||||
if err != nil || canaryRs == nil {
|
||||
return workload, err
|
||||
|
|
@ -246,8 +235,6 @@ func (r *ControllerFinder) getStatefulSetLikeWorkload(namespace string, ref *rol
|
|||
RevisionLabelKey: apps.ControllerRevisionHashLabelKey,
|
||||
StableRevision: workloadInfo.Status.StableRevision,
|
||||
CanaryRevision: workloadInfo.Status.UpdateRevision,
|
||||
CanaryReplicas: workloadInfo.Status.UpdatedReplicas,
|
||||
CanaryReadyReplicas: workloadInfo.Status.UpdatedReadyReplicas,
|
||||
ObjectMeta: workloadInfo.ObjectMeta,
|
||||
Replicas: workloadInfo.Replicas,
|
||||
PodTemplateHash: workloadInfo.Status.UpdateRevision,
|
||||
|
|
@ -258,14 +245,11 @@ func (r *ControllerFinder) getStatefulSetLikeWorkload(namespace string, ref *rol
|
|||
if _, ok := workload.Annotations[InRolloutProgressingAnnotation]; !ok {
|
||||
return workload, nil
|
||||
}
|
||||
|
||||
// in rollout progressing
|
||||
workload.InRolloutProgressing = true
|
||||
|
||||
if workloadInfo.Status.UpdateRevision == workloadInfo.Status.StableRevision && workloadInfo.Status.UpdatedReplicas != workloadInfo.Status.Replicas {
|
||||
workload.IsInRollback = true
|
||||
}
|
||||
|
||||
return workload, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
)
|
||||
|
||||
// patch -> file.Content,
|
||||
// for example: lua_configuration/trafficrouting_ingress/nginx.lua -> nginx.lua content
|
||||
// for example: lua_configuration/trafficrouting_ingress/ingress.lua -> ingress.lua content
|
||||
var luaConfigurationList map[string]string
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func ParseWorkload(object client.Object) *WorkloadInfo {
|
|||
gvk := object.GetObjectKind().GroupVersionKind()
|
||||
return &WorkloadInfo{
|
||||
LogKey: fmt.Sprintf("%s (%s)", key, gvk),
|
||||
ObjectMeta: *getMetadata(object),
|
||||
ObjectMeta: *GetMetadata(object),
|
||||
Replicas: GetReplicas(object),
|
||||
Status: *ParseWorkloadStatus(object),
|
||||
}
|
||||
|
|
@ -294,8 +294,8 @@ func getSelector(object client.Object) (labels.Selector, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// getMetadata can parse the whole metadata field from client workload object
|
||||
func getMetadata(object client.Object) *metav1.ObjectMeta {
|
||||
// GetMetadata can parse the whole metadata field from client workload object
|
||||
func GetMetadata(object client.Object) *metav1.ObjectMeta {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return &o.ObjectMeta
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -43,16 +44,6 @@ type RolloutState struct {
|
|||
RolloutName string `json:"rolloutName"`
|
||||
}
|
||||
|
||||
func GetRolloutState(annotations map[string]string) (*RolloutState, error) {
|
||||
value, ok := annotations[InRolloutProgressingAnnotation]
|
||||
if !ok || value == "" {
|
||||
return nil, nil
|
||||
}
|
||||
var obj *RolloutState
|
||||
err := json.Unmarshal([]byte(value), &obj)
|
||||
return obj, err
|
||||
}
|
||||
|
||||
func IsRollbackInBatchPolicy(rollout *rolloutv1alpha1.Rollout, labels map[string]string) bool {
|
||||
// currently, only support the case of no traffic routing
|
||||
if len(rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
|
|
@ -63,8 +54,7 @@ func IsRollbackInBatchPolicy(rollout *rolloutv1alpha1.Rollout, labels map[string
|
|||
if workloadRef.Kind == ControllerKindSts.Kind ||
|
||||
workloadRef.Kind == ControllerKruiseKindCS.Kind ||
|
||||
strings.EqualFold(labels[WorkloadTypeLabel], ControllerKindSts.Kind) {
|
||||
value, ok := rollout.Annotations[RollbackInBatchAnnotation]
|
||||
if ok && value == "true" {
|
||||
if rollout.Annotations[rolloutv1alpha1.RollbackInBatchAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
@ -161,3 +151,8 @@ func DumpJSON(o interface{}) string {
|
|||
by, _ := json.Marshal(o)
|
||||
return string(by)
|
||||
}
|
||||
|
||||
// hash hashes `data` with sha256 and returns the hex string
|
||||
func EncodeHash(data string) string {
|
||||
return fmt.Sprintf("%x", sha256.Sum256([]byte(data)))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"encoding/json"
|
||||
"math"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
|
|
@ -53,17 +54,24 @@ type WorkloadHandler struct {
|
|||
var _ admission.Handler = &WorkloadHandler{}
|
||||
|
||||
// Handle handles admission requests.
|
||||
// TODO
|
||||
// Currently there is an implicit condition for rollout: the workload must be currently in a stable version (only one version of Pods),
|
||||
// if not, it will not enter the rollout process. There is an additional problem here, the user may not be aware of this.
|
||||
// when user does a release and thinks it enters the rollout process, but due to the implicit condition above,
|
||||
// it actually goes through the normal release process. No good idea to solve this problem has been found yet.
|
||||
func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
|
||||
// if subResources, then ignore
|
||||
if req.Operation != admissionv1.Update || req.SubResource != "" {
|
||||
return admission.Allowed("")
|
||||
}
|
||||
|
||||
// Because kruise Rollout is a bypassed approach, needs to be determined in the webhook if the workload meet to enter the rollout progressing:
|
||||
// 1. Traffic Routing, all the following conditions must be met
|
||||
// a. PodTemplateSpec is changed
|
||||
// b. Workload must only contain one version of Pods
|
||||
// 2. No Traffic Routing, Only Release in batches
|
||||
// a. No RolloutId
|
||||
// - PodTemplateSpec is changed
|
||||
// b. Configure RolloutId
|
||||
// - RolloutId and PodTemplateSpec change, enter the rollout progressing.
|
||||
// - RolloutId changes and PodTemplateSpec no change, enter the rollout progressing
|
||||
// - RolloutId no change and PodTemplateSpec change, do not enter the rollout progressing
|
||||
|
||||
switch req.Kind.Group {
|
||||
// kruise cloneSet
|
||||
case kruiseappsv1alpha1.GroupVersion.Group:
|
||||
|
|
@ -156,40 +164,31 @@ func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) adm
|
|||
}
|
||||
}
|
||||
|
||||
func (h *WorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (changed bool, err error) {
|
||||
func (h *WorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (bool, error) {
|
||||
// indicate whether the workload can enter the rollout process
|
||||
// 1. replicas > 0
|
||||
replicas := util.GetReplicas(newObj)
|
||||
if replicas == 0 {
|
||||
if util.GetReplicas(newObj) == 0 || !util.IsStatefulSetRollingUpdate(newObj) {
|
||||
return false, nil
|
||||
}
|
||||
oldTemplate := util.GetTemplate(oldObj)
|
||||
if oldTemplate == nil {
|
||||
oldTemplate, newTemplate := util.GetTemplate(oldObj), util.GetTemplate(newObj)
|
||||
if oldTemplate == nil || newTemplate == nil {
|
||||
return false, nil
|
||||
}
|
||||
newTemplate := util.GetTemplate(newObj)
|
||||
if newTemplate == nil {
|
||||
oldMetadata, newMetadata := util.GetMetadata(oldObj), util.GetMetadata(newObj)
|
||||
if newMetadata.Annotations[appsv1alpha1.RolloutIDLabel] != "" &&
|
||||
oldMetadata.Annotations[appsv1alpha1.RolloutIDLabel] == newMetadata.Annotations[appsv1alpha1.RolloutIDLabel] {
|
||||
return false, nil
|
||||
} else if newMetadata.Annotations[appsv1alpha1.RolloutIDLabel] == "" && util.EqualIgnoreHash(oldTemplate, newTemplate) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// 2. statefulset.spec.template is changed
|
||||
if util.EqualIgnoreHash(oldTemplate, newTemplate) {
|
||||
return
|
||||
}
|
||||
// 3. have matched rollout crd
|
||||
rollout, err := h.fetchMatchedRollout(newObj)
|
||||
if err != nil {
|
||||
return
|
||||
} else if rollout == nil {
|
||||
return
|
||||
return false, err
|
||||
} else if rollout == nil || rollout.Spec.Strategy.Canary == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
klog.Infof("StatefulSet-Like Workload(%s/%s) will be in rollout progressing, and paused", newObj.GetNamespace(), newObj.GetName())
|
||||
if !util.IsStatefulSetRollingUpdate(newObj) {
|
||||
return
|
||||
}
|
||||
|
||||
changed = true
|
||||
util.SetStatefulSetPartition(newObj, math.MaxInt16)
|
||||
state := &util.RolloutState{RolloutName: rollout.Name}
|
||||
by, _ := json.Marshal(state)
|
||||
|
|
@ -199,53 +198,50 @@ func (h *WorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstruct
|
|||
}
|
||||
annotation[util.InRolloutProgressingAnnotation] = string(by)
|
||||
newObj.SetAnnotations(annotation)
|
||||
return
|
||||
klog.Infof("StatefulSet(%s/%s) will be released incrementally based on Rollout(%s)", newMetadata.Namespace, newMetadata.Name, rollout.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (changed bool, err error) {
|
||||
func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (bool, error) {
|
||||
// in rollout progressing
|
||||
if state, _ := util.GetRolloutState(newObj.Annotations); state != nil {
|
||||
// deployment paused=false is not allowed until the rollout is completed
|
||||
if newObj.Spec.Paused == false {
|
||||
changed = true
|
||||
if newObj.Annotations[util.InRolloutProgressingAnnotation] != "" {
|
||||
if !newObj.Spec.Paused || !reflect.DeepEqual(newObj.Spec.Strategy, oldObj.Spec.Strategy) {
|
||||
newObj.Spec.Paused = true
|
||||
klog.Warningf("deployment(%s/%s) is in rollout(%s) progressing, and set paused=true", newObj.Namespace, newObj.Name, state.RolloutName)
|
||||
newObj.Spec.Strategy = oldObj.Spec.Strategy
|
||||
klog.Warningf("deployment(%s/%s) is in rollout progressing, and do not modify strategy", newObj.Namespace, newObj.Name)
|
||||
return true, nil
|
||||
}
|
||||
return
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// indicate whether the workload can enter the rollout process
|
||||
// 1. replicas > 0
|
||||
// replicas > 0
|
||||
if newObj.Spec.Replicas != nil && *newObj.Spec.Replicas == 0 {
|
||||
return
|
||||
return false, nil
|
||||
}
|
||||
// 2. deployment.spec.strategy.type must be RollingUpdate
|
||||
if newObj.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
|
||||
klog.Warningf("deployment(%s/%s) strategy type is 'Recreate', rollout will not work on it", newObj.Namespace, newObj.Name)
|
||||
return
|
||||
if newObj.Annotations[appsv1alpha1.RolloutIDLabel] != "" &&
|
||||
oldObj.Annotations[appsv1alpha1.RolloutIDLabel] == newObj.Annotations[appsv1alpha1.RolloutIDLabel] {
|
||||
return false, nil
|
||||
} else if newObj.Annotations[appsv1alpha1.RolloutIDLabel] == "" && util.EqualIgnoreHash(&oldObj.Spec.Template, &newObj.Spec.Template) {
|
||||
return false, nil
|
||||
}
|
||||
// 3. deployment.spec.PodTemplate not change
|
||||
if util.EqualIgnoreHash(&oldObj.Spec.Template, &newObj.Spec.Template) {
|
||||
return
|
||||
}
|
||||
// 4. the deployment must be in a stable version (only one version of rs)
|
||||
rss, err := h.Finder.GetReplicaSetsForDeployment(newObj)
|
||||
if err != nil {
|
||||
return
|
||||
} else if len(rss) != 1 {
|
||||
klog.Warningf("deployment(%s/%s) contains len(%d) replicaSet, can't in rollout progressing", newObj.Namespace, newObj.Name, len(rss))
|
||||
return
|
||||
}
|
||||
// 5. have matched rollout crd
|
||||
|
||||
rollout, err := h.fetchMatchedRollout(newObj)
|
||||
if err != nil {
|
||||
return
|
||||
} else if rollout == nil {
|
||||
return
|
||||
return false, err
|
||||
} else if rollout == nil || rollout.Spec.Strategy.Canary == nil {
|
||||
return false, nil
|
||||
}
|
||||
// if traffic routing, workload must only be one version of Pods
|
||||
if len(rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
if rss, err := h.Finder.GetReplicaSetsForDeployment(newObj); err != nil {
|
||||
return false, nil
|
||||
} else if len(rss) != 1 {
|
||||
klog.Warningf("Because deployment(%s/%s) have multiple versions of Pods, so can not enter rollout progressing", newObj.Namespace, newObj.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
klog.Infof("deployment(%s/%s) will be in rollout progressing, and set paused=true", newObj.Namespace, newObj.Name)
|
||||
|
||||
changed = true
|
||||
// need set workload paused = true
|
||||
newObj.Spec.Paused = true
|
||||
state := &util.RolloutState{RolloutName: rollout.Name}
|
||||
|
|
@ -254,30 +250,35 @@ func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (cha
|
|||
newObj.Annotations = map[string]string{}
|
||||
}
|
||||
newObj.Annotations[util.InRolloutProgressingAnnotation] = string(by)
|
||||
return
|
||||
klog.Infof("Deployment(%s/%s) will be released incrementally based on Rollout(%s)", newObj.Namespace, newObj.Name, rollout.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *WorkloadHandler) handleCloneSet(newObj, oldObj *kruiseappsv1alpha1.CloneSet) (changed bool, err error) {
|
||||
func (h *WorkloadHandler) handleCloneSet(newObj, oldObj *kruiseappsv1alpha1.CloneSet) (bool, error) {
|
||||
// indicate whether the workload can enter the rollout process
|
||||
// 1. replicas > 0
|
||||
// when cloneSet don't contain any pods, no need to enter rollout progressing
|
||||
if newObj.Spec.Replicas != nil && *newObj.Spec.Replicas == 0 {
|
||||
return
|
||||
return false, nil
|
||||
}
|
||||
// 2. cloneSet.spec.PodTemplate is changed
|
||||
if util.EqualIgnoreHash(&oldObj.Spec.Template, &newObj.Spec.Template) {
|
||||
return
|
||||
}
|
||||
// 3. have matched rollout crd
|
||||
rollout, err := h.fetchMatchedRollout(newObj)
|
||||
if err != nil {
|
||||
return
|
||||
} else if rollout == nil {
|
||||
return
|
||||
if newObj.Annotations[appsv1alpha1.RolloutIDLabel] != "" &&
|
||||
oldObj.Annotations[appsv1alpha1.RolloutIDLabel] == newObj.Annotations[appsv1alpha1.RolloutIDLabel] {
|
||||
return false, nil
|
||||
} else if newObj.Annotations[appsv1alpha1.RolloutIDLabel] == "" && util.EqualIgnoreHash(&oldObj.Spec.Template, &newObj.Spec.Template) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
rollout, err := h.fetchMatchedRollout(newObj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if rollout == nil || rollout.Spec.Strategy.Canary == nil {
|
||||
return false, nil
|
||||
}
|
||||
// if traffic routing, there must only be one version of Pods
|
||||
if len(rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 && newObj.Status.Replicas != newObj.Status.UpdatedReplicas {
|
||||
klog.Warningf("Because cloneSet(%s/%s) have multiple versions of Pods, so can not enter rollout progressing", newObj.Namespace, newObj.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
klog.Infof("cloneSet(%s/%s) will be in rollout progressing, and paused", newObj.Namespace, newObj.Name)
|
||||
changed = true
|
||||
// need set workload partition = 100%
|
||||
newObj.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
|
||||
state := &util.RolloutState{RolloutName: rollout.Name}
|
||||
by, _ := json.Marshal(state)
|
||||
|
|
@ -285,7 +286,8 @@ func (h *WorkloadHandler) handleCloneSet(newObj, oldObj *kruiseappsv1alpha1.Clon
|
|||
newObj.Annotations = map[string]string{}
|
||||
}
|
||||
newObj.Annotations[util.InRolloutProgressingAnnotation] = string(by)
|
||||
return
|
||||
klog.Infof("CloneSet(%s/%s) will be released incrementally based on Rollout(%s)", newObj.Namespace, newObj.Name, rollout.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *WorkloadHandler) fetchMatchedRollout(obj client.Object) (*appsv1alpha1.Rollout, error) {
|
||||
|
|
|
|||
|
|
@ -221,6 +221,9 @@ var (
|
|||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
Strategy: appsv1alpha1.RolloutStrategy{
|
||||
Canary: &appsv1alpha1.CanaryStrategy{},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
@ -313,7 +316,18 @@ func TestHandlerDeployment(t *testing.T) {
|
|||
return []*apps.ReplicaSet{rs1, rs2}
|
||||
},
|
||||
getRollout: func() *appsv1alpha1.Rollout {
|
||||
return rolloutDemo.DeepCopy()
|
||||
demo := rolloutDemo.DeepCopy()
|
||||
demo.Spec.Strategy.Canary = &appsv1alpha1.CanaryStrategy{
|
||||
TrafficRoutings: []*appsv1alpha1.TrafficRouting{
|
||||
{
|
||||
Service: "echoserver",
|
||||
Ingress: &appsv1alpha1.IngressTrafficRouting{
|
||||
Name: "echoserver",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return demo
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -373,6 +387,81 @@ func TestHandlerDeployment(t *testing.T) {
|
|||
return obj
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rolloutId and podTemplateSpec changed",
|
||||
getObjs: func() (*apps.Deployment, *apps.Deployment) {
|
||||
oldObj := deploymentDemo.DeepCopy()
|
||||
newObj := deploymentDemo.DeepCopy()
|
||||
newObj.Annotations[appsv1alpha1.RolloutIDLabel] = "v2"
|
||||
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
return oldObj, newObj
|
||||
},
|
||||
expectObj: func() *apps.Deployment {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
|
||||
obj.Spec.Paused = true
|
||||
obj.Annotations[appsv1alpha1.RolloutIDLabel] = "v2"
|
||||
return obj
|
||||
},
|
||||
getRs: func() []*apps.ReplicaSet {
|
||||
rs := rsDemo.DeepCopy()
|
||||
return []*apps.ReplicaSet{rs}
|
||||
},
|
||||
getRollout: func() *appsv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
return obj
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rolloutId change, and podTemplateSpec no change",
|
||||
getObjs: func() (*apps.Deployment, *apps.Deployment) {
|
||||
oldObj := deploymentDemo.DeepCopy()
|
||||
newObj := deploymentDemo.DeepCopy()
|
||||
newObj.Annotations[appsv1alpha1.RolloutIDLabel] = "v1-alpha1"
|
||||
return oldObj, newObj
|
||||
},
|
||||
expectObj: func() *apps.Deployment {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
|
||||
obj.Spec.Paused = true
|
||||
obj.Annotations[appsv1alpha1.RolloutIDLabel] = "v1-alpha1"
|
||||
return obj
|
||||
},
|
||||
getRs: func() []*apps.ReplicaSet {
|
||||
rs := rsDemo.DeepCopy()
|
||||
return []*apps.ReplicaSet{rs}
|
||||
},
|
||||
getRollout: func() *appsv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
return obj
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rolloutId no change, and podTemplateSpec change",
|
||||
getObjs: func() (*apps.Deployment, *apps.Deployment) {
|
||||
oldObj := deploymentDemo.DeepCopy()
|
||||
oldObj.Annotations[appsv1alpha1.RolloutIDLabel] = "v1"
|
||||
newObj := deploymentDemo.DeepCopy()
|
||||
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
newObj.Annotations[appsv1alpha1.RolloutIDLabel] = "v1"
|
||||
return oldObj, newObj
|
||||
},
|
||||
expectObj: func() *apps.Deployment {
|
||||
obj := deploymentDemo.DeepCopy()
|
||||
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
obj.Annotations[appsv1alpha1.RolloutIDLabel] = "v1"
|
||||
return obj
|
||||
},
|
||||
getRs: func() []*apps.ReplicaSet {
|
||||
rs := rsDemo.DeepCopy()
|
||||
return []*apps.ReplicaSet{rs}
|
||||
},
|
||||
getRollout: func() *appsv1alpha1.Rollout {
|
||||
obj := rolloutDemo.DeepCopy()
|
||||
return obj
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
decoder, _ := admission.NewDecoder(scheme)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue