support bluegreen release: support workload of deployment and cloneSet (#238)

* support bluegreen release: Deployment and CloneSet

Signed-off-by: yunbo <yunbo10124scut@gmail.com>

* support bluegreen release: webhook update

Signed-off-by: yunbo <yunbo10124scut@gmail.com>

* add unit test & split workload mutating webhook

Signed-off-by: yunbo <yunbo10124scut@gmail.com>

* fix a bug caused by previous merged PR

Signed-off-by: yunbo <yunbo10124scut@gmail.com>

* improve some log information

Signed-off-by: yunbo <yunbo10124scut@gmail.com>

* fix kruise version problem

Signed-off-by: yunbo <yunbo10124scut@gmail.com>

---------

Signed-off-by: yunbo <yunbo10124scut@gmail.com>
Co-authored-by: yunbo <yunbo10124scut@gmail.com>
This commit is contained in:
myname4423 2024-12-24 19:38:52 +08:00 committed by GitHub
parent 056c77dbd2
commit efbd8ba8f9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
59 changed files with 6379 additions and 558 deletions

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -0,0 +1,146 @@
name: E2E-V1Beta1-BlueGreen-1.19
on:
push:
branches:
- master
- release-*
pull_request: {}
workflow_dispatch: {}
env:
# Common versions
GO_VERSION: '1.19'
KIND_IMAGE: 'kindest/node:v1.19.16'
KIND_CLUSTER_NAME: 'ci-testing'
jobs:
rollout:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Setup Kind Cluster
uses: helm/kind-action@v1.2.0
with:
node_image: ${{ env.KIND_IMAGE }}
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
config: ./test/kind-conf.yaml
- name: Build image
run: |
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
docker build --pull --no-cache . -t $IMAGE
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
- name: Install Kruise
run: |
set -ex
kubectl cluster-info
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
echo "Wait for kruise-manager ready successfully"
else
echo "Timeout to wait for kruise-manager ready"
exit 1
fi
- name: Install Kruise Rollout
run: |
set -ex
kubectl cluster-info
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
set -e
if [ "$PODS" -eq "1" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
kubectl get node -o yaml
kubectl get all -n kruise-rollout -o yaml
set -e
if [ "$PODS" -eq "1" ]; then
echo "Wait for kruise-rollout ready successfully"
else
echo "Timeout to wait for kruise-rollout ready"
exit 1
fi
- name: Bluegreen Release Disable HPA
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='bluegreen disable hpa test case - autoscaling/v1 for v1.19' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: Deployment Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: CloneSet Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal

View File

@ -0,0 +1,146 @@
name: E2E-V1Beta1-BlueGreen-1.23
on:
push:
branches:
- master
- release-*
pull_request: {}
workflow_dispatch: {}
env:
# Common versions
GO_VERSION: '1.19'
KIND_IMAGE: 'kindest/node:v1.23.3'
KIND_CLUSTER_NAME: 'ci-testing'
jobs:
rollout:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Setup Kind Cluster
uses: helm/kind-action@v1.2.0
with:
node_image: ${{ env.KIND_IMAGE }}
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
config: ./test/kind-conf.yaml
- name: Build image
run: |
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
docker build --pull --no-cache . -t $IMAGE
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
- name: Install Kruise
run: |
set -ex
kubectl cluster-info
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
echo "Wait for kruise-manager ready successfully"
else
echo "Timeout to wait for kruise-manager ready"
exit 1
fi
- name: Install Kruise Rollout
run: |
set -ex
kubectl cluster-info
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
set -e
if [ "$PODS" -eq "1" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
kubectl get node -o yaml
kubectl get all -n kruise-rollout -o yaml
set -e
if [ "$PODS" -eq "1" ]; then
echo "Wait for kruise-rollout ready successfully"
else
echo "Timeout to wait for kruise-rollout ready"
exit 1
fi
- name: Bluegreen Release Disable HPA
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='bluegreen delete rollout case - autoscaling/v2 for v1.23' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: Deployment Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: CloneSet Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal

View File

@ -1,4 +1,4 @@
name: E2E-V1Beta1-1.19
name: E2E-V1Beta1-JUMP-1.19
on:
push:
@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -1,4 +1,4 @@
name: E2E-V1Beta1-1.23
name: E2E-V1Beta1-JUMP-1.23
on:
push:
@ -44,7 +44,7 @@ jobs:
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
helm install kruise openkruise/kruise --version 1.7.0
for ((i=1;i<10;i++));
do
set +e

View File

@ -172,7 +172,9 @@ func (dst *Rollout) ConvertFrom(src conversion.Hub) error {
srcV1beta1 := src.(*v1beta1.Rollout)
dst.ObjectMeta = srcV1beta1.ObjectMeta
if !srcV1beta1.Spec.Strategy.IsCanaryStragegy() {
return fmt.Errorf("v1beta1 Rollout with %s strategy cannot be converted to v1alpha1", srcV1beta1.Spec.Strategy.GetRollingStyle())
// only v1beta1 supports bluegreen strategy
// Don't log the message because it will print too often
return nil
}
// spec
dst.Spec = RolloutSpec{

View File

@ -117,6 +117,8 @@ type BatchReleaseStatus struct {
// Phase is the release plan phase, which indicates the current state of release
// plan state machine in BatchRelease controller.
Phase RolloutPhase `json:"phase,omitempty"`
// Message provides details on why the rollout is in its current phase
Message string `json:"message,omitempty"`
}
type BatchReleaseCanaryStatus struct {

View File

@ -62,31 +62,6 @@ type DeploymentStrategy struct {
Partition intstr.IntOrString `json:"partition,omitempty"`
}
// OriginalDeploymentStrategy stores part of the fileds of a workload,
// so that it can be restored when finalizing.
// It is only used for BlueGreen Release
// Similar to DeploymentStrategy, it is an annotation used in workload
// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy
type OriginalDeploymentStrategy struct {
// The deployment strategy to use to replace existing pods with new ones.
// +optional
// +patchStrategy=retainKeys
Strategy *apps.DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. Defaults to 600s.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
}
type RollingStyleType string
const (
@ -138,44 +113,3 @@ func SetDefaultDeploymentStrategy(strategy *DeploymentStrategy) {
}
}
}
func SetDefaultSetting(setting *OriginalDeploymentStrategy) {
if setting.ProgressDeadlineSeconds == nil {
setting.ProgressDeadlineSeconds = new(int32)
*setting.ProgressDeadlineSeconds = 600
}
if setting.Strategy == nil {
setting.Strategy = &apps.DeploymentStrategy{}
}
if setting.Strategy.Type == "" {
setting.Strategy.Type = apps.RollingUpdateDeploymentStrategyType
}
if setting.Strategy.Type == apps.RecreateDeploymentStrategyType {
return
}
strategy := setting.Strategy
if strategy.RollingUpdate == nil {
strategy.RollingUpdate = &apps.RollingUpdateDeployment{}
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set MaxUnavailable as 25% by default
maxUnavailable := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set MaxSurge as 25% by default
maxSurge := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxSurge
}
// Cannot allow maxSurge==0 && MaxUnavailable==0, otherwise, no pod can be updated when rolling update.
maxSurge, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxSurge, 100, true)
maxUnavailable, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxUnavailable, 100, true)
if maxSurge == 0 && maxUnavailable == 0 {
strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 0},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
}
}
}

View File

@ -579,6 +579,8 @@ const (
FinalisingStepReleaseWorkloadControl FinalisingStepType = "ReleaseWorkloadControl"
// All needed work done
FinalisingStepTypeEnd FinalisingStepType = "END"
// Only for debugging use
FinalisingStepWaitEndless FinalisingStepType = "WaitEndless"
)
// +genclient

View File

@ -422,31 +422,6 @@ func (in *ObjectRef) DeepCopy() *ObjectRef {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OriginalDeploymentStrategy) DeepCopyInto(out *OriginalDeploymentStrategy) {
*out = *in
if in.Strategy != nil {
in, out := &in.Strategy, &out.Strategy
*out = new(v1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalDeploymentStrategy.
func (in *OriginalDeploymentStrategy) DeepCopy() *OriginalDeploymentStrategy {
if in == nil {
return nil
}
out := new(OriginalDeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PatchPodTemplateMetadata) DeepCopyInto(out *PatchPodTemplateMetadata) {
*out = *in

View File

@ -507,6 +507,10 @@ spec:
- type
type: object
type: array
message:
description: Message provides details on why the rollout is in its
current phase
type: string
observedGeneration:
description: ObservedGeneration is the most recent generation observed
for this BatchRelease. It corresponds to this BatchRelease's generation,

View File

@ -161,6 +161,16 @@ rules:
- get
- patch
- update
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:

View File

@ -65,48 +65,6 @@ webhooks:
resources:
- deployments
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-apps-v1-statefulset
failurePolicy: Fail
name: mstatefulset.kb.io
rules:
- apiGroups:
- apps
apiVersions:
- v1
operations:
- UPDATE
resources:
- statefulsets
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-apps-kruise-io-statefulset
failurePolicy: Fail
name: madvancedstatefulset.kb.io
rules:
- apiGroups:
- apps.kruise.io
apiVersions:
- v1alpha1
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- statefulsets
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1

View File

@ -18,16 +18,16 @@ webhooks:
matchExpressions:
- key: rollouts.kruise.io/workload-type
operator: Exists
- name: mstatefulset.kb.io
objectSelector:
matchExpressions:
- key: rollouts.kruise.io/workload-type
operator: Exists
- name: madvancedstatefulset.kb.io
objectSelector:
matchExpressions:
- key: rollouts.kruise.io/workload-type
operator: Exists
# - name: mstatefulset.kb.io
# objectSelector:
# matchExpressions:
# - key: rollouts.kruise.io/workload-type
# operator: Exists
# - name: madvancedstatefulset.kb.io
# objectSelector:
# matchExpressions:
# - key: rollouts.kruise.io/workload-type
# operator: Exists
- name: mdeployment.kb.io
objectSelector:
matchExpressions:

View File

@ -148,6 +148,7 @@ type BatchReleaseReconciler struct {
// +kubebuilder:rbac:groups=apps.kruise.io,resources=statefulsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;update;patch
// Reconcile reads that state of the cluster for a Rollout object and makes changes based on the state read
// and what is in the Rollout.Spec

View File

@ -24,6 +24,9 @@ import (
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle"
bgcloneset "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/cloneset"
bgdeplopyment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/deployment"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle"
canarydeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle/deployment"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle"
@ -32,6 +35,7 @@ import (
partitiondeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/deployment"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/statefulset"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -145,7 +149,11 @@ func (r *Executor) progressBatches(release *v1beta1.BatchRelease, newStatus *v1b
switch {
case err == nil:
result = reconcile.Result{RequeueAfter: DefaultDuration}
removeProgressingCondition(newStatus)
newStatus.CanaryStatus.CurrentBatchState = v1beta1.VerifyingBatchState
case errors.IsBadRequest(err):
progressingStateTransition(newStatus, v1.ConditionTrue, v1beta1.ProgressingReasonInRolling, err.Error())
fallthrough
default:
klog.Warningf("Failed to upgrade %v, err %v", klog.KObj(release), err)
}
@ -204,14 +212,14 @@ func (r *Executor) getReleaseController(release *v1beta1.BatchRelease, newStatus
klog.Infof("BatchRelease(%v) using %s-style release controller for this batch release", klog.KObj(release), rollingStyle)
switch rollingStyle {
case v1beta1.BlueGreenRollingStyle:
// if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() {
// klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
// return partitionstyle.NewControlPlane(cloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
// }
// if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() {
// klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
// return bluegreenstyle.NewControlPlane(deployment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
// }
if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() {
klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
return bluegreenstyle.NewControlPlane(bgcloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
}
if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() {
klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
return bluegreenstyle.NewControlPlane(bgdeplopyment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
}
case v1beta1.CanaryRollingStyle:
if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() {
@ -257,3 +265,23 @@ func isPartitioned(release *v1beta1.BatchRelease) bool {
return release.Spec.ReleasePlan.BatchPartition != nil &&
*release.Spec.ReleasePlan.BatchPartition <= release.Status.CanaryStatus.CurrentBatch
}
func progressingStateTransition(status *v1beta1.BatchReleaseStatus, condStatus v1.ConditionStatus, reason, message string) {
cond := util.GetBatchReleaseCondition(*status, v1beta1.RolloutConditionProgressing)
if cond == nil {
cond = util.NewRolloutCondition(v1beta1.RolloutConditionProgressing, condStatus, reason, message)
} else {
cond.Status = condStatus
cond.Reason = reason
if message != "" {
cond.Message = message
}
}
util.SetBatchReleaseCondition(status, *cond)
status.Message = cond.Message
}
func removeProgressingCondition(status *v1beta1.BatchReleaseStatus) {
util.RemoveBatchReleaseCondition(status, v1beta1.RolloutConditionProgressing)
status.Message = ""
}

View File

@ -61,6 +61,9 @@ type BatchContext struct {
Pods []*corev1.Pod `json:"-"`
// filter or sort pods before patch label
FilterFunc FilterFuncType `json:"-"`
// the next two fields are only used for bluegreen style
CurrentSurge intstr.IntOrString `json:"currentSurge,omitempty"`
DesiredSurge intstr.IntOrString `json:"desiredSurge,omitempty"`
}
type FilterFuncType func(pods []*corev1.Pod, ctx *BatchContext) []*corev1.Pod

View File

@ -0,0 +1,42 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package control
import "k8s.io/apimachinery/pkg/util/intstr"
// OriginalDeploymentStrategy stores part of the fileds of a workload,
// so that it can be restored when finalizing.
// It is only used for BlueGreen Release
// Similar to DeploymentStrategy, it is an annotation used in workload
// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy
type OriginalDeploymentStrategy struct {
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
// +optional
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. Defaults to 600s.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
}

View File

@ -0,0 +1,225 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloneset
import (
"context"
"fmt"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
"github.com/openkruise/rollouts/pkg/util/patch"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type realController struct {
*util.WorkloadInfo
client client.Client
pods []*corev1.Pod
key types.NamespacedName
object *kruiseappsv1alpha1.CloneSet
}
func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface {
return &realController{
key: key,
client: cli,
}
}
func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo {
return rc.WorkloadInfo
}
func (rc *realController) BuildController() (bluegreenstyle.Interface, error) {
if rc.object != nil {
return rc, nil
}
object := &kruiseappsv1alpha1.CloneSet{}
if err := rc.client.Get(context.TODO(), rc.key, object); err != nil {
return rc, err
}
rc.object = object
rc.WorkloadInfo = util.ParseWorkload(object)
return rc, nil
}
func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) {
if rc.pods != nil {
return rc.pods, nil
}
var err error
rc.pods, err = util.ListOwnedPods(rc.client, rc.object)
return rc.pods, err
}
func (rc *realController) Initialize(release *v1beta1.BatchRelease) error {
if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) {
return nil
}
// disable the hpa
if err := hpa.DisableHPA(rc.client, rc.object); err != nil {
return err
}
klog.InfoS("Initialize: disable hpa for cloneset successfully", "cloneset", klog.KObj(rc.object))
// patch the cloneset
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return errors.NewBadRequestError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error()))
}
control.InitOriginalSetting(&setting, rc.object)
patchData := patch.NewClonesetPatch()
patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting))
patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef(
release, release.GetObjectKind().GroupVersionKind())))
// we use partition = 100% to function as "paused" instead of setting pasued field as true
// it is manily to keep consistency with partition style (partition is already set as 100% in webhook)
patchData.UpdatePaused(false)
maxSurge := intstr.FromInt(1) // select the minimum positive number as initial value
maxUnavailable := intstr.FromInt(0)
patchData.UpdateMaxSurge(&maxSurge)
patchData.UpdateMaxUnavailable(&maxUnavailable)
patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds)
klog.InfoS("Initialize: try to update cloneset", "cloneset", klog.KObj(rc.object), "patchData", patchData.String())
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error {
if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil {
return errors.NewBadRequestError(fmt.Errorf("cannot upgrade batch, because cloneset %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error()))
}
desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true)
current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true)
if current >= desired {
klog.InfoS("No need to upgrade batch, because current >= desired", "cloneset", klog.KObj(rc.object), "current", current, "desired", desired)
return nil
} else {
klog.InfoS("Will update batch for cloneset, because current < desired", "cloneset", klog.KObj(rc.object), "current", current, "desired", desired)
}
patchData := patch.NewClonesetPatch()
// avoid interference from partition
patchData.UpdatePartiton(nil)
patchData.UpdateMaxSurge(&ctx.DesiredSurge)
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
func (rc *realController) Finalize(release *v1beta1.BatchRelease) error {
if release.Spec.ReleasePlan.BatchPartition != nil {
// continuous release (not supported yet)
/*
patchData := patch.NewClonesetPatch()
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
*/
klog.Warningf("continuous release is not supported yet for bluegreen style release")
return nil
}
// restore the original setting and remove annotation
if !rc.restored() {
c := util.GetEmptyObjectWithKey(rc.object)
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return err
}
patchData := patch.NewClonesetPatch()
patchData.UpdateMinReadySeconds(setting.MinReadySeconds)
patchData.UpdateMaxSurge(setting.MaxSurge)
patchData.UpdateMaxUnavailable(setting.MaxUnavailable)
patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation)
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
if err := rc.client.Patch(context.TODO(), c, patchData); err != nil {
return err
}
klog.InfoS("Finalize: cloneset bluegreen release: wait all pods updated and ready", "cloneset", klog.KObj(rc.object))
}
// wait all pods updated and ready
if rc.object.Status.ReadyReplicas != rc.object.Status.UpdatedReadyReplicas {
return errors.NewRetryError(fmt.Errorf("cloneset %v finalize not done, readyReplicas %d != updatedReadyReplicas %d, current policy %s",
klog.KObj(rc.object), rc.object.Status.ReadyReplicas, rc.object.Status.UpdatedReadyReplicas, release.Spec.ReleasePlan.FinalizingPolicy))
}
klog.InfoS("Finalize: cloneset bluegreen release: all pods updated and ready", "cloneset", klog.KObj(rc.object))
// restore the hpa
return hpa.RestoreHPA(rc.client, rc.object)
}
func (rc *realController) restored() bool {
if rc.object == nil || rc.object.DeletionTimestamp != nil {
return true
}
if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 {
return true
}
return false
}
// bluegreen doesn't support rollback in batch, because:
// - bluegreen support traffic rollback instead, rollback in batch is not necessary
// - it's diffcult for both Deployment and CloneSet to support rollback in batch, with the "minReadySeconds" implementation
func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) {
// current batch index
currentBatch := release.Status.CanaryStatus.CurrentBatch
// the number of expected updated pods
desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas
// the number of current updated pods
currentSurge := intstr.FromInt(0)
if rc.object.Spec.UpdateStrategy.MaxSurge != nil {
currentSurge = *rc.object.Spec.UpdateStrategy.MaxSurge
if currentSurge == intstr.FromInt(1) {
// currentSurge == intstr.FromInt(1) means that currentSurge is the initial value
// if the value is indeed set by user, setting it to 0 still does no harm
currentSurge = intstr.FromInt(0)
}
}
desired, _ := intstr.GetScaledValueFromIntOrPercent(&desiredSurge, int(rc.Replicas), true)
batchContext := &batchcontext.BatchContext{
Pods: rc.pods,
RolloutID: release.Spec.ReleasePlan.RolloutID,
CurrentBatch: currentBatch,
UpdateRevision: release.Status.UpdateRevision,
DesiredSurge: desiredSurge,
CurrentSurge: currentSurge,
// the following fields isused to check if batch is ready
Replicas: rc.Replicas,
UpdatedReplicas: rc.Status.UpdatedReplicas,
UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas,
DesiredUpdatedReplicas: int32(desired),
PlannedUpdatedReplicas: int32(desired),
}
// the number of no need update pods that marked before rollout
// if noNeedUpdate := release.Status.CanaryStatus.NoNeedUpdateReplicas; noNeedUpdate != nil {
// batchContext.FilterFunc = labelpatch.FilterPodsForUnorderedUpdate
// }
return batchContext, nil
}

View File

@ -0,0 +1,548 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloneset
import (
"context"
"encoding/json"
"reflect"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
rolloutapi "github.com/openkruise/rollouts/api"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/util"
apps "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var (
scheme = runtime.NewScheme()
cloneKey = types.NamespacedName{
Namespace: "default",
Name: "cloneset",
}
cloneDemo = &kruiseappsv1alpha1.CloneSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "CloneSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: cloneKey.Name,
Namespace: cloneKey.Namespace,
Generation: 1,
Labels: map[string]string{
"app": "busybox",
},
Annotations: map[string]string{
"type": "unit-test",
},
},
Spec: kruiseappsv1alpha1.CloneSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "busybox",
},
},
Replicas: pointer.Int32(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
Paused: true,
Partition: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "busybox",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "busybox",
Image: "busybox:latest",
},
},
},
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 10,
UpdatedReplicas: 0,
ReadyReplicas: 10,
AvailableReplicas: 10,
UpdatedReadyReplicas: 0,
UpdateRevision: "version-2",
CurrentRevision: "version-1",
ObservedGeneration: 1,
CollisionCount: pointer.Int32(1),
},
}
releaseDemo = &v1beta1.BatchRelease{
TypeMeta: metav1.TypeMeta{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "BatchRelease",
},
ObjectMeta: metav1.ObjectMeta{
Name: "release",
Namespace: cloneKey.Namespace,
UID: uuid.NewUUID(),
},
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.FromString("10%"),
},
{
CanaryReplicas: intstr.FromString("50%"),
},
{
CanaryReplicas: intstr.FromString("100%"),
},
},
},
WorkloadRef: v1beta1.ObjectRef{
APIVersion: cloneDemo.APIVersion,
Kind: cloneDemo.Kind,
Name: cloneDemo.Name,
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 0,
},
},
}
hpaDemo = &autoscalingv1.HorizontalPodAutoscaler{
TypeMeta: metav1.TypeMeta{
APIVersion: "autoscaling/v1",
Kind: "HorizontalPodAutoscaler",
},
ObjectMeta: metav1.ObjectMeta{
Name: "hpa",
Namespace: cloneKey.Namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "CloneSet",
Name: cloneDemo.Name,
},
MinReplicas: pointer.Int32(1),
MaxReplicas: 10,
},
}
)
func init() {
apps.AddToScheme(scheme)
rolloutapi.AddToScheme(scheme)
kruiseappsv1alpha1.AddToScheme(scheme)
autoscalingv1.AddToScheme(scheme)
}
func TestControlPackage(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "CloneSet Control Package Suite")
}
var _ = Describe("CloneSet Control", func() {
var (
c client.Client
rc *realController
cloneset *kruiseappsv1alpha1.CloneSet
release *v1beta1.BatchRelease
hpa *autoscalingv1.HorizontalPodAutoscaler
)
BeforeEach(func() {
cloneset = cloneDemo.DeepCopy()
release = releaseDemo.DeepCopy()
hpa = hpaDemo.DeepCopy()
c = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cloneset, release, hpa).
Build()
rc = &realController{
key: types.NamespacedName{Namespace: cloneset.Namespace, Name: cloneset.Name},
client: c,
}
})
It("should initialize cloneset successfully", func() {
// build controller
_, err := rc.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Initialize method
err = retryFunction(3, func() error {
return rc.Initialize(release)
})
Expect(err).NotTo(HaveOccurred())
// inspect if HPA is disabled
disabledHPA := &autoscalingv1.HorizontalPodAutoscaler{}
err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, disabledHPA)
Expect(err).NotTo(HaveOccurred())
Expect(disabledHPA.Spec.ScaleTargetRef.Name).To(Equal(cloneset.Name + "-DisableByRollout"))
// inspect if Cloneset is patched properly
updatedCloneset := &kruiseappsv1alpha1.CloneSet{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(cloneset), updatedCloneset)
Expect(err).NotTo(HaveOccurred())
// inspect if annotations are added
Expect(updatedCloneset.Annotations).To(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation))
Expect(updatedCloneset.Annotations).To(HaveKey(util.BatchReleaseControlAnnotation))
Expect(updatedCloneset.Annotations[util.BatchReleaseControlAnnotation]).To(Equal(getControlInfo(release)))
// inspect if strategy is updated
Expect(updatedCloneset.Spec.UpdateStrategy.Paused).To(BeFalse())
Expect(updatedCloneset.Spec.UpdateStrategy.MaxSurge.IntVal).To(Equal(int32(1)))
Expect(updatedCloneset.Spec.UpdateStrategy.MaxUnavailable.IntVal).To(Equal(int32(0)))
Expect(updatedCloneset.Spec.MinReadySeconds).To(Equal(int32(v1beta1.MaxReadySeconds)))
})
It("should finalize CloneSet successfully", func() {
// hack to patch cloneset status
cloneset.Status.UpdatedReadyReplicas = 10
err := c.Status().Update(context.TODO(), cloneset)
Expect(err).NotTo(HaveOccurred())
// build controller
rc.object = nil
_, err = rc.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Finalize method
err = retryFunction(3, func() error {
return rc.Finalize(release)
})
Expect(err).NotTo(HaveOccurred())
// inspect if CloneSet is patched properly
updatedCloneset := &kruiseappsv1alpha1.CloneSet{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(cloneset), updatedCloneset)
Expect(err).NotTo(HaveOccurred())
// inspect if annotations are removed
Expect(updatedCloneset.Annotations).NotTo(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation))
Expect(updatedCloneset.Annotations).NotTo(HaveKey(util.BatchReleaseControlAnnotation))
// inspect if strategy is restored
Expect(updatedCloneset.Spec.UpdateStrategy.MaxSurge).To(BeNil())
Expect(*updatedCloneset.Spec.UpdateStrategy.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 1}))
Expect(updatedCloneset.Spec.MinReadySeconds).To(Equal(int32(0)))
// inspect if HPA is restored
restoredHPA := &autoscalingv1.HorizontalPodAutoscaler{}
err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, restoredHPA)
Expect(err).NotTo(HaveOccurred())
Expect(restoredHPA.Spec.ScaleTargetRef.Name).To(Equal(cloneset.Name))
})
It("should upgradBatch for CloneSet successfully", func() {
// call Initialize method
_, err := rc.BuildController()
Expect(err).NotTo(HaveOccurred())
err = retryFunction(3, func() error {
return rc.Initialize(release)
})
Expect(err).NotTo(HaveOccurred())
// call UpgradeBatch method
rc.object = nil
_, err = rc.BuildController()
Expect(err).NotTo(HaveOccurred())
batchContext, err := rc.CalculateBatchContext(release)
Expect(err).NotTo(HaveOccurred())
err = rc.UpgradeBatch(batchContext)
Expect(err).NotTo(HaveOccurred())
// inspect if CloneSet is patched properly
updatedCloneset := &kruiseappsv1alpha1.CloneSet{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(cloneset), updatedCloneset)
Expect(err).NotTo(HaveOccurred())
Expect(*updatedCloneset.Spec.UpdateStrategy.MaxSurge).To(Equal(intstr.IntOrString{Type: intstr.String, StrVal: "10%"}))
Expect(*updatedCloneset.Spec.UpdateStrategy.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 0}))
})
})
func TestCalculateBatchContext(t *testing.T) {
RegisterFailHandler(Fail)
cases := map[string]struct {
workload func() *kruiseappsv1alpha1.CloneSet
release func() *v1beta1.BatchRelease
result *batchcontext.BatchContext
}{
"normal case batch0": {
workload: func() *kruiseappsv1alpha1.CloneSet {
return &kruiseappsv1alpha1.CloneSet{
Spec: kruiseappsv1alpha1.CloneSetSpec{
Replicas: pointer.Int32Ptr(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
MaxSurge: func() *intstr.IntOrString { p := intstr.FromInt(1); return &p }(),
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 10,
UpdatedReplicas: 0,
UpdatedReadyReplicas: 0,
AvailableReplicas: 10,
},
}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 0,
},
UpdateRevision: "update-version",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 0,
DesiredSurge: intstr.FromString("50%"),
CurrentSurge: intstr.FromInt(0),
Replicas: 10,
UpdatedReplicas: 0,
UpdatedReadyReplicas: 0,
UpdateRevision: "update-version",
PlannedUpdatedReplicas: 5,
DesiredUpdatedReplicas: 5,
},
},
"normal case batch1": {
workload: func() *kruiseappsv1alpha1.CloneSet {
return &kruiseappsv1alpha1.CloneSet{
Spec: kruiseappsv1alpha1.CloneSetSpec{
Replicas: pointer.Int32(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("50%"); return &p }(),
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 15,
UpdatedReplicas: 5,
UpdatedReadyReplicas: 5,
AvailableReplicas: 10,
},
}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
UpdateRevision: "update-version",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 1,
DesiredSurge: intstr.FromString("100%"),
CurrentSurge: intstr.FromString("50%"),
Replicas: 10,
UpdatedReplicas: 5,
UpdatedReadyReplicas: 5,
UpdateRevision: "update-version",
PlannedUpdatedReplicas: 10,
DesiredUpdatedReplicas: 10,
},
},
"normal case batch2": {
workload: func() *kruiseappsv1alpha1.CloneSet {
return &kruiseappsv1alpha1.CloneSet{
Spec: kruiseappsv1alpha1.CloneSetSpec{
Replicas: pointer.Int32Ptr(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(),
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 20,
UpdatedReplicas: 10,
UpdatedReadyReplicas: 10,
AvailableReplicas: 10,
ReadyReplicas: 20,
},
}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 2,
},
UpdateRevision: "update-version",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 2,
UpdateRevision: "update-version",
DesiredSurge: intstr.FromString("100%"),
CurrentSurge: intstr.FromString("100%"),
Replicas: 10,
UpdatedReplicas: 10,
UpdatedReadyReplicas: 10,
PlannedUpdatedReplicas: 10,
DesiredUpdatedReplicas: 10,
},
},
}
for name, cs := range cases {
t.Run(name, func(t *testing.T) {
control := realController{
object: cs.workload(),
WorkloadInfo: util.ParseWorkload(cs.workload()),
}
got, err := control.CalculateBatchContext(cs.release())
Expect(err).NotTo(HaveOccurred())
Expect(got.Log()).Should(Equal(cs.result.Log()))
})
}
}
func TestRealController(t *testing.T) {
RegisterFailHandler(Fail)
release := releaseDemo.DeepCopy()
clone := cloneDemo.DeepCopy()
// for unit test we should set some default value since no webhook or controller is working
clone.Spec.UpdateStrategy.Type = kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone).Build()
// build new controller
c := NewController(cli, cloneKey, clone.GroupVersionKind()).(*realController)
controller, err := c.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Initialize
err = controller.Initialize(release)
Expect(err).NotTo(HaveOccurred())
fetch := &kruiseappsv1alpha1.CloneSet{}
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
// check strategy
Expect(fetch.Spec.UpdateStrategy.Type).Should(Equal(kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType))
// partition is set to 100% in webhook, therefore we cannot observe it in unit test
// Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.Partition, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue())
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
// check annotations
Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release)))
Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"},
MinReadySeconds: 0,
})))
c.object = fetch // mock
for {
batchContext, err := controller.CalculateBatchContext(release)
Expect(err).NotTo(HaveOccurred())
err = controller.UpgradeBatch(batchContext)
fetch = &kruiseappsv1alpha1.CloneSet{}
// mock
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
c.object = fetch
if err == nil {
break
}
}
fetch = &kruiseappsv1alpha1.CloneSet{}
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("10%"))
Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(0)))
Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false))
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"},
MinReadySeconds: 0,
})))
controller.Finalize(release)
fetch = &kruiseappsv1alpha1.CloneSet{}
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("0%"))
Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(1)))
Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false))
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0)))
}
func getControlInfo(release *v1beta1.BatchRelease) string {
owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind()))
return string(owner)
}
func retryFunction(limit int, f func() error) (err error) {
for i := limit; i >= 0; i-- {
if err = f(); err == nil {
return nil
}
}
return err
}

View File

@ -0,0 +1,178 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bluegreenstyle
import (
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/labelpatch"
"github.com/openkruise/rollouts/pkg/util"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type realBatchControlPlane struct {
Interface
client.Client
record.EventRecorder
patcher labelpatch.LabelPatcher
release *v1beta1.BatchRelease
newStatus *v1beta1.BatchReleaseStatus
}
type NewInterfaceFunc func(cli client.Client, key types.NamespacedName, gvk schema.GroupVersionKind) Interface
// NewControlPlane creates a new release controller with bluegreen-style to drive batch release state machine
func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.EventRecorder, release *v1beta1.BatchRelease, newStatus *v1beta1.BatchReleaseStatus, key types.NamespacedName, gvk schema.GroupVersionKind) *realBatchControlPlane {
return &realBatchControlPlane{
Client: cli,
EventRecorder: recorder,
newStatus: newStatus,
Interface: f(cli, key, gvk),
release: release.DeepCopy(),
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)),
}
}
func (rc *realBatchControlPlane) Initialize() error {
controller, err := rc.BuildController()
if err != nil {
return err
}
// claim workload under our control
err = controller.Initialize(rc.release)
if err != nil {
return err
}
// record revision and replicas
workloadInfo := controller.GetWorkloadInfo()
rc.newStatus.StableRevision = workloadInfo.Status.StableRevision
rc.newStatus.UpdateRevision = workloadInfo.Status.UpdateRevision
rc.newStatus.ObservedWorkloadReplicas = workloadInfo.Replicas
return err
}
func (rc *realBatchControlPlane) UpgradeBatch() error {
controller, err := rc.BuildController()
if err != nil {
return err
}
if controller.GetWorkloadInfo().Replicas == 0 {
return nil
}
batchContext, err := controller.CalculateBatchContext(rc.release)
if err != nil {
return err
}
klog.Infof("BatchRelease %v calculated context when upgrade batch: %s",
klog.KObj(rc.release), batchContext.Log())
err = controller.UpgradeBatch(batchContext)
if err != nil {
return err
}
return nil
}
func (rc *realBatchControlPlane) CheckBatchReady() error {
controller, err := rc.BuildController()
if err != nil {
return err
}
if controller.GetWorkloadInfo().Replicas == 0 {
return nil
}
// do not countAndUpdateNoNeedUpdateReplicas when checking,
// the target calculated should be consistent with UpgradeBatch.
batchContext, err := controller.CalculateBatchContext(rc.release)
if err != nil {
return err
}
klog.Infof("BatchRelease %v calculated context when check batch ready: %s",
klog.KObj(rc.release), batchContext.Log())
return batchContext.IsBatchReady()
}
func (rc *realBatchControlPlane) Finalize() error {
controller, err := rc.BuildController()
if err != nil {
return client.IgnoreNotFound(err)
}
// release workload control info and clean up resources if it needs
return controller.Finalize(rc.release)
}
func (rc *realBatchControlPlane) SyncWorkloadInformation() (control.WorkloadEventType, *util.WorkloadInfo, error) {
// ignore the sync if the release plan is deleted
if rc.release.DeletionTimestamp != nil {
return control.WorkloadNormalState, nil, nil
}
controller, err := rc.BuildController()
if err != nil {
if errors.IsNotFound(err) {
return control.WorkloadHasGone, nil, err
}
return control.WorkloadUnknownState, nil, err
}
workloadInfo := controller.GetWorkloadInfo()
if !workloadInfo.IsStable() {
klog.Infof("Workload(%v) still reconciling, waiting for it to complete, generation: %v, observed: %v",
workloadInfo.LogKey, workloadInfo.Generation, workloadInfo.Status.ObservedGeneration)
return control.WorkloadStillReconciling, workloadInfo, nil
}
if workloadInfo.IsPromoted() {
klog.Infof("Workload(%v) has been promoted, no need to rollout again actually, replicas: %v, updated: %v",
workloadInfo.LogKey, workloadInfo.Replicas, workloadInfo.Status.UpdatedReadyReplicas)
return control.WorkloadNormalState, workloadInfo, nil
}
if workloadInfo.IsScaling(rc.newStatus.ObservedWorkloadReplicas) {
klog.Warningf("Workload(%v) replicas is modified, replicas from: %v to -> %v",
workloadInfo.LogKey, rc.newStatus.ObservedWorkloadReplicas, workloadInfo.Replicas)
return control.WorkloadReplicasChanged, workloadInfo, nil
}
if workloadInfo.IsRollback(rc.newStatus.StableRevision, rc.newStatus.UpdateRevision) {
klog.Warningf("Workload(%v) is rolling back", workloadInfo.LogKey)
return control.WorkloadRollbackInBatch, workloadInfo, nil
}
if workloadInfo.IsRevisionNotEqual(rc.newStatus.UpdateRevision) {
klog.Warningf("Workload(%v) updateRevision is modified, updateRevision from: %v to -> %v",
workloadInfo.LogKey, rc.newStatus.UpdateRevision, workloadInfo.Status.UpdateRevision)
return control.WorkloadPodTemplateChanged, workloadInfo, nil
}
return control.WorkloadNormalState, workloadInfo, nil
}

View File

@ -0,0 +1,336 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"context"
"fmt"
"github.com/openkruise/rollouts/api/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa"
deploymentutil "github.com/openkruise/rollouts/pkg/controller/deployment/util"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
"github.com/openkruise/rollouts/pkg/util/patch"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type realController struct {
*util.WorkloadInfo
client client.Client
pods []*corev1.Pod
key types.NamespacedName
object *apps.Deployment
finder *util.ControllerFinder
}
func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface {
return &realController{
key: key,
client: cli,
finder: util.NewControllerFinder(cli),
}
}
func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo {
return rc.WorkloadInfo
}
func (rc *realController) BuildController() (bluegreenstyle.Interface, error) {
if rc.object != nil {
return rc, nil
}
object := &apps.Deployment{}
if err := rc.client.Get(context.TODO(), rc.key, object); err != nil {
return rc, err
}
rc.object = object
rc.WorkloadInfo = rc.getWorkloadInfo(object)
return rc, nil
}
func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) {
if rc.pods != nil {
return rc.pods, nil
}
var err error
rc.pods, err = util.ListOwnedPods(rc.client, rc.object)
return rc.pods, err
}
// Initialize prepares the Deployment for the BatchRelease process
func (rc *realController) Initialize(release *v1beta1.BatchRelease) error {
if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) {
return nil
}
// Disable the HPA
if err := hpa.DisableHPA(rc.client, rc.object); err != nil {
return err
}
klog.InfoS("Initialize: disabled HPA for deployment successfully", "deployment", klog.KObj(rc.object))
// Patch minReadySeconds for stable ReplicaSet
if err := rc.patchStableRSMinReadySeconds(v1beta1.MaxReadySeconds); err != nil {
return err
}
klog.InfoS("Initialize: patched minReadySeconds for stable replicaset successfully", "deployment", klog.KObj(rc.object))
// Patch Deplopyment
if err := rc.patchDeployment(release); err != nil {
return err
}
klog.InfoS("Initialize: patched deployment successfully", "deployment", klog.KObj(rc.object))
return nil
}
func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error {
if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil {
return errors.NewBadRequestError(fmt.Errorf("cannot upgrade batch, because deployment %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error()))
}
desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true)
current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true)
if current >= desired {
klog.Infof("No need to upgrade batch for deployment %v: because current %d >= desired %d", klog.KObj(rc.object), current, desired)
return nil
}
klog.Infof("Ready to upgrade batch for deployment %v: current %d < desired %d", klog.KObj(rc.object), current, desired)
patchData := patch.NewDeploymentPatch()
// different with canary release, bluegreen don't need to set pause in the process of rollout
// because our webhook may pause the Deployment in some situations, we ensure that the Deployment is not paused
patchData.UpdatePaused(false)
patchData.UpdateStrategy(apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: &ctx.DesiredSurge,
MaxUnavailable: &intstr.IntOrString{},
},
})
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
// set pause to false, restore the original setting, delete annotation
func (rc *realController) Finalize(release *v1beta1.BatchRelease) error {
if release.Spec.ReleasePlan.BatchPartition != nil {
// continuous release (not supported yet)
/*
patchData := patch.NewDeploymentPatch()
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
if err := rc.client.Patch(context.TODO(), d, patchData); err != nil {
return err
}
*/
klog.Warningf("continuous release is not supported yet for bluegreen style release")
return nil
}
// restore the original setting and remove annotation
d := util.GetEmptyObjectWithKey(rc.object)
if !rc.restored() {
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return err
}
patchData := patch.NewDeploymentPatch()
// restore the original setting
patchData.UpdatePaused(false)
patchData.UpdateMinReadySeconds(setting.MinReadySeconds)
patchData.UpdateProgressDeadlineSeconds(setting.ProgressDeadlineSeconds)
patchData.UpdateMaxSurge(setting.MaxSurge)
patchData.UpdateMaxUnavailable(setting.MaxUnavailable)
// restore label and annotation
patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation)
patchData.DeleteLabel(v1alpha1.DeploymentStableRevisionLabel)
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
if err := rc.client.Patch(context.TODO(), d, patchData); err != nil {
return err
}
klog.InfoS("Finalize: deployment bluegreen release: wait all pods updated and ready", "Deployment", klog.KObj(rc.object))
}
// wait all pods updated and ready
if err := waitAllUpdatedAndReady(d.(*apps.Deployment)); err != nil {
return errors.NewRetryError(err)
}
klog.InfoS("Finalize: All pods updated and ready, then restore hpa", "Deployment", klog.KObj(rc.object))
// restore hpa
return hpa.RestoreHPA(rc.client, rc.object)
}
func (rc *realController) restored() bool {
if rc.object == nil || rc.object.DeletionTimestamp != nil {
return true
}
if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 {
return true
}
return false
}
func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) {
currentBatch := release.Status.CanaryStatus.CurrentBatch
desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas
PlannedUpdatedReplicas := deploymentutil.NewRSReplicasLimit(desiredSurge, rc.object)
currentSurge := intstr.FromInt(0)
if rc.object.Spec.Strategy.RollingUpdate != nil && rc.object.Spec.Strategy.RollingUpdate.MaxSurge != nil {
currentSurge = *rc.object.Spec.Strategy.RollingUpdate.MaxSurge
if currentSurge == intstr.FromInt(1) {
// currentSurge == intstr.FromInt(1) means that currentSurge is the initial value
// if the value is indeed set by user, setting it to 0 still does no harm
currentSurge = intstr.FromInt(0)
}
}
return &batchcontext.BatchContext{
Pods: rc.pods,
RolloutID: release.Spec.ReleasePlan.RolloutID,
CurrentBatch: currentBatch,
CurrentSurge: currentSurge,
DesiredSurge: desiredSurge,
UpdateRevision: release.Status.UpdateRevision,
Replicas: rc.Replicas,
UpdatedReplicas: rc.Status.UpdatedReplicas,
UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas,
PlannedUpdatedReplicas: PlannedUpdatedReplicas,
DesiredUpdatedReplicas: PlannedUpdatedReplicas,
}, nil
}
func (rc *realController) getWorkloadInfo(d *apps.Deployment) *util.WorkloadInfo {
workloadInfo := util.ParseWorkload(d)
workloadInfo.Status.UpdatedReadyReplicas = 0
if res, err := rc.getUpdatedReadyReplicas(d); err == nil {
workloadInfo.Status.UpdatedReadyReplicas = res
}
workloadInfo.Status.StableRevision = d.Labels[v1alpha1.DeploymentStableRevisionLabel]
return workloadInfo
}
func (rc *realController) getUpdatedReadyReplicas(d *apps.Deployment) (int32, error) {
rss := &apps.ReplicaSetList{}
listOpts := []client.ListOption{
client.InNamespace(d.Namespace),
client.MatchingLabels(d.Spec.Selector.MatchLabels),
client.UnsafeDisableDeepCopy,
}
if err := rc.client.List(context.TODO(), rss, listOpts...); err != nil {
klog.Warningf("getWorkloadInfo failed, because"+"%s", err.Error())
return -1, err
}
allRSs := rss.Items
// select rs owner by current deployment
ownedRSs := make([]*apps.ReplicaSet, 0)
for i := range allRSs {
rs := &allRSs[i]
if !rs.DeletionTimestamp.IsZero() {
continue
}
if metav1.IsControlledBy(rs, d) {
ownedRSs = append(ownedRSs, rs)
}
}
newRS := deploymentutil.FindNewReplicaSet(d, ownedRSs)
updatedReadyReplicas := int32(0)
// if newRS is nil, it means the replicaset hasn't been created (because the deployment is paused)
// therefore we can return 0 directly
if newRS != nil {
updatedReadyReplicas = newRS.Status.ReadyReplicas
}
return updatedReadyReplicas, nil
}
func waitAllUpdatedAndReady(deployment *apps.Deployment) error {
if deployment.Spec.Paused {
return fmt.Errorf("deployment should not be paused")
}
// ALL pods updated AND ready
if deployment.Status.ReadyReplicas != deployment.Status.UpdatedReplicas {
return fmt.Errorf("all ready replicas should be updated, and all updated replicas should be ready")
}
availableReplicas := deployment.Status.AvailableReplicas
allowedUnavailable := util.DeploymentMaxUnavailable(deployment)
if allowedUnavailable+availableReplicas < deployment.Status.Replicas {
return fmt.Errorf("ready replicas should satisfy maxUnavailable")
}
return nil
}
// Patch minReadySeconds for stable ReplicaSet
/*
Here is why:
For rollback scenario, we should set the stable rs minReadySeconds to infinity to make pods of the stable rs unavailable,
otherwise Pods in new version would be terminated immediately when rollback happens.
we want to keep them until traffic is switched to the stable version
*/
func (rc *realController) patchStableRSMinReadySeconds(seconds int32) error {
if stableRS, err := rc.finder.GetDeploymentStableRs(rc.object); err != nil {
return fmt.Errorf("failed to get stable ReplicaSet: %v", err)
} else if stableRS == nil {
klog.Warningf("No stable ReplicaSet found for deployment %s/%s", rc.object.Namespace, rc.object.Name)
} else {
body := fmt.Sprintf(`{"spec":{"minReadySeconds":%v}}`, seconds)
if err = rc.client.Patch(context.TODO(), stableRS, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
return fmt.Errorf("failed to patch ReplicaSet %s/%s minReadySeconds to %v: %v", stableRS.Namespace, stableRS.Name, v1beta1.MaxReadySeconds, err)
}
}
return nil
}
// Update deployment strategy: MinReadySeconds, ProgressDeadlineSeconds, MaxSurge, MaxUnavailable
func (rc *realController) patchDeployment(release *v1beta1.BatchRelease) error {
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return errors.NewBadRequestError(fmt.Errorf("cannot get original setting for deployment %v: %s", klog.KObj(rc.object), err.Error()))
}
control.InitOriginalSetting(&setting, rc.object)
patchData := patch.NewDeploymentPatch()
patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting))
patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef(
release, release.GetObjectKind().GroupVersionKind())))
patchData.UpdateStrategy(apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 0},
},
})
patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds)
patchData.UpdateProgressDeadlineSeconds(utilpointer.Int32(v1beta1.MaxProgressSeconds))
// Apply the patch to the Deployment
if err := rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData); err != nil {
return fmt.Errorf("failed to patch deployment %v: %v", klog.KObj(rc.object), err)
}
return nil
}

View File

@ -0,0 +1,728 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strconv"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
rolloutapi "github.com/openkruise/rollouts/api"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
apps "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var (
scheme = runtime.NewScheme()
deploymentKey = types.NamespacedName{
Name: "deployment",
Namespace: "default",
}
deploymentDemo = &apps.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploymentKey.Name,
Namespace: deploymentKey.Namespace,
Generation: 1,
Labels: map[string]string{
"app": "busybox",
},
Annotations: map[string]string{
"type": "unit-test",
},
},
Spec: apps.DeploymentSpec{
Paused: true,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "busybox",
},
},
Replicas: pointer.Int32(10),
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"},
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "busybox",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "busybox",
Image: "busybox:latest",
},
},
},
},
},
Status: apps.DeploymentStatus{
Replicas: 10,
UpdatedReplicas: 0,
ReadyReplicas: 10,
AvailableReplicas: 10,
CollisionCount: pointer.Int32(1),
ObservedGeneration: 1,
},
}
deploymentDemo2 = &apps.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "deployment",
Namespace: "default",
UID: types.UID("87076677"),
Generation: 2,
Labels: map[string]string{
"app": "busybox",
apps.DefaultDeploymentUniqueLabelKey: "update-pod-hash",
},
},
Spec: apps.DeploymentSpec{
Replicas: pointer.Int32(10),
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(0)},
},
},
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "busybox",
},
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: containers("v2"),
},
},
},
Status: apps.DeploymentStatus{
Replicas: 10,
ReadyReplicas: 10,
UpdatedReplicas: 0,
AvailableReplicas: 10,
},
}
releaseDemo = &v1beta1.BatchRelease{
TypeMeta: metav1.TypeMeta{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "BatchRelease",
},
ObjectMeta: metav1.ObjectMeta{
Name: "release",
Namespace: deploymentKey.Namespace,
UID: uuid.NewUUID(),
},
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.FromString("10%"),
},
{
CanaryReplicas: intstr.FromString("50%"),
},
{
CanaryReplicas: intstr.FromString("100%"),
},
},
},
WorkloadRef: v1beta1.ObjectRef{
APIVersion: deploymentDemo.APIVersion,
Kind: deploymentDemo.Kind,
Name: deploymentDemo.Name,
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
},
}
hpaDemo = &autoscalingv1.HorizontalPodAutoscaler{
TypeMeta: metav1.TypeMeta{
APIVersion: "autoscaling/v1",
Kind: "HorizontalPodAutoscaler",
},
ObjectMeta: metav1.ObjectMeta{
Name: "hpa",
Namespace: deploymentKey.Namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "Deployment",
Name: deploymentDemo.Name,
},
MinReplicas: pointer.Int32(1),
MaxReplicas: 10,
},
}
)
func init() {
apps.AddToScheme(scheme)
rolloutapi.AddToScheme(scheme)
kruiseappsv1alpha1.AddToScheme(scheme)
autoscalingv1.AddToScheme(scheme)
}
func TestControlPackage(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Deployment Control Package Suite")
}
var _ = Describe("Deployment Control", func() {
var (
c client.Client
rc *realController
deployment *apps.Deployment
release *v1beta1.BatchRelease
hpa *autoscalingv1.HorizontalPodAutoscaler
stableRS *apps.ReplicaSet
canaryRS *apps.ReplicaSet
)
BeforeEach(func() {
deployment = deploymentDemo.DeepCopy()
release = releaseDemo.DeepCopy()
hpa = hpaDemo.DeepCopy()
deployment = getStableWithReady(deployment, "v1").(*apps.Deployment)
stableRS = makeStableReplicaSets(deployment).(*apps.ReplicaSet)
stableRS.Spec.MinReadySeconds = 0
stableRS.Status.ReadyReplicas = *deployment.Spec.Replicas
stableRS.Status.AvailableReplicas = *deployment.Spec.Replicas
canaryRS = makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
canaryRS.Status.ReadyReplicas = 0
canaryRS.Status.AvailableReplicas = 0
c = fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(deployment, release, hpa, stableRS, canaryRS).
Build()
rc = &realController{
key: types.NamespacedName{Namespace: deployment.Namespace, Name: deployment.Name},
client: c,
finder: util.NewControllerFinder(c),
}
})
It("should initialize Deployment successfully", func() {
// build controller
_, err := rc.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Initialize method
err = retryFunction(3, func() error {
return rc.Initialize(release)
})
Expect(err).NotTo(HaveOccurred())
// inspect if HPA is disabled
disabledHPA := &autoscalingv1.HorizontalPodAutoscaler{}
err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, disabledHPA)
Expect(err).NotTo(HaveOccurred())
Expect(disabledHPA.Spec.ScaleTargetRef.Name).To(Equal(deployment.Name + "-DisableByRollout"))
// inspect if MinReadySeconds of stable ReplicaSet is updated
stableRSAfter := &apps.ReplicaSet{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(stableRS), stableRSAfter)
Expect(err).NotTo(HaveOccurred())
Expect(stableRSAfter.Spec.MinReadySeconds).To(Equal(int32(v1beta1.MaxReadySeconds)))
// inspect if Deployment is patched properly
updatedDeployment := &apps.Deployment{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDeployment)
Expect(err).NotTo(HaveOccurred())
// inspect if annotations are added
Expect(updatedDeployment.Annotations).To(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation))
Expect(updatedDeployment.Annotations).To(HaveKey(util.BatchReleaseControlAnnotation))
Expect(updatedDeployment.Annotations[util.BatchReleaseControlAnnotation]).To(Equal(getControlInfo(release)))
// inspect if strategy is updated
Expect(updatedDeployment.Spec.Strategy.RollingUpdate).NotTo(BeNil())
Expect(updatedDeployment.Spec.Strategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1)))
Expect(updatedDeployment.Spec.Strategy.RollingUpdate.MaxUnavailable.IntVal).To(Equal(int32(0)))
Expect(updatedDeployment.Spec.MinReadySeconds).To(Equal(int32(v1beta1.MaxReadySeconds)))
Expect(*updatedDeployment.Spec.ProgressDeadlineSeconds).To(Equal(int32(v1beta1.MaxProgressSeconds)))
})
It("should finalize Deployment successfully", func() {
// build controller
rc.object = nil
_, err := rc.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Finalize method
err = retryFunction(3, func() error {
return rc.Finalize(release)
})
Expect(err).NotTo(HaveOccurred())
// inspect if Deployment is patched properly
updatedDeployment := &apps.Deployment{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDeployment)
Expect(err).NotTo(HaveOccurred())
// inspect if annotations are removed
Expect(updatedDeployment.Annotations).NotTo(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation))
Expect(updatedDeployment.Annotations).NotTo(HaveKey(util.BatchReleaseControlAnnotation))
// inspect if strategy is restored
Expect(updatedDeployment.Spec.Strategy.RollingUpdate).NotTo(BeNil())
Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxSurge).To(Equal(intstr.IntOrString{Type: intstr.String, StrVal: "20%"}))
Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 1}))
Expect(updatedDeployment.Spec.MinReadySeconds).To(Equal(int32(0)))
Expect(updatedDeployment.Spec.ProgressDeadlineSeconds).To(BeNil())
// inspect if HPA is restored
restoredHPA := &autoscalingv1.HorizontalPodAutoscaler{}
err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, restoredHPA)
Expect(err).NotTo(HaveOccurred())
Expect(restoredHPA.Spec.ScaleTargetRef.Name).To(Equal(deployment.Name))
// inspect if MinReadySeconds of stable ReplicaSet is restored
stableRSAfter := &apps.ReplicaSet{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(stableRS), stableRSAfter)
Expect(err).NotTo(HaveOccurred())
Expect(stableRSAfter.Spec.MinReadySeconds).To(Equal(int32(0)))
})
It("should upgradBatch for Deployment successfully", func() {
// call Initialize method
_, err := rc.BuildController()
Expect(err).NotTo(HaveOccurred())
err = retryFunction(3, func() error {
return rc.Initialize(release)
})
Expect(err).NotTo(HaveOccurred())
// call UpgradeBatch method
rc.object = nil
_, err = rc.BuildController()
Expect(err).NotTo(HaveOccurred())
batchContext, err := rc.CalculateBatchContext(release)
Expect(err).NotTo(HaveOccurred())
err = rc.UpgradeBatch(batchContext)
Expect(err).NotTo(HaveOccurred())
// inspect if Deployment is patched properly
updatedDeployment := &apps.Deployment{}
err = c.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDeployment)
Expect(err).NotTo(HaveOccurred())
Expect(updatedDeployment.Spec.Paused).To(BeFalse())
Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxSurge).To(Equal(intstr.IntOrString{Type: intstr.String, StrVal: "50%"}))
Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 0}))
})
})
func TestCalculateBatchContext(t *testing.T) {
RegisterFailHandler(Fail)
cases := map[string]struct {
workload func() []client.Object
release func() *v1beta1.BatchRelease
result *batchcontext.BatchContext
}{
"noraml case": {
workload: func() []client.Object {
deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment)
deployment.Status = apps.DeploymentStatus{
Replicas: 15,
UpdatedReplicas: 5,
AvailableReplicas: 12,
ReadyReplicas: 12,
}
// current partition, ie. maxSurge
deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "50%"}
deployment.Spec.Replicas = pointer.Int32(10)
newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
newRss.Status.ReadyReplicas = 2
return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"},
},
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"},
},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
UpdateRevision: "version-2",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 1,
UpdateRevision: "version-2",
DesiredSurge: intstr.IntOrString{Type: intstr.String, StrVal: "100%"},
CurrentSurge: intstr.IntOrString{Type: intstr.String, StrVal: "50%"},
Replicas: 10,
UpdatedReplicas: 5,
UpdatedReadyReplicas: 2,
PlannedUpdatedReplicas: 10,
DesiredUpdatedReplicas: 10,
},
},
"maxSurge=99%, replicas=5": {
workload: func() []client.Object {
deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment)
deployment.Status = apps.DeploymentStatus{
Replicas: 9,
UpdatedReplicas: 4,
AvailableReplicas: 9,
ReadyReplicas: 9,
}
deployment.Spec.Replicas = pointer.Int32Ptr(5)
// current partition, ie. maxSurge
deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "90%"}
newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
newRss.Status.ReadyReplicas = 4
return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "90%"},
},
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "99%"},
},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
UpdateRevision: "version-2",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 1,
UpdateRevision: "version-2",
DesiredSurge: intstr.FromString("99%"),
CurrentSurge: intstr.FromString("90%"),
Replicas: 5,
UpdatedReplicas: 4,
UpdatedReadyReplicas: 4,
PlannedUpdatedReplicas: 4,
DesiredUpdatedReplicas: 4,
},
},
// test case for continuous release
// "maxSurge=100%, but it is initialized value": {
// workload: func() []client.Object {
// deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment)
// deployment.Status = apps.DeploymentStatus{
// Replicas: 10,
// UpdatedReplicas: 0,
// AvailableReplicas: 10,
// ReadyReplicas: 10,
// }
// // current partition, ie. maxSurge
// deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
// newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
// newRss.Status.ReadyReplicas = 0
// return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)}
// },
// release: func() *v1beta1.BatchRelease {
// r := &v1beta1.BatchRelease{
// Spec: v1beta1.BatchReleaseSpec{
// ReleasePlan: v1beta1.ReleasePlan{
// FailureThreshold: &percent,
// FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
// Batches: []v1beta1.ReleaseBatch{
// {
// CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"},
// },
// },
// },
// },
// Status: v1beta1.BatchReleaseStatus{
// CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
// CurrentBatch: 0,
// },
// UpdateRevision: "version-2",
// },
// }
// return r
// },
// result: &batchcontext.BatchContext{
// CurrentBatch: 0,
// UpdateRevision: "version-2",
// DesiredPartition: intstr.FromString("50%"),
// FailureThreshold: &percent,
// CurrentPartition: intstr.FromString("0%"), // mainly check this
// Replicas: 10,
// UpdatedReplicas: 0,
// UpdatedReadyReplicas: 0,
// PlannedUpdatedReplicas: 5,
// DesiredUpdatedReplicas: 5,
// },
// },
}
for name, cs := range cases {
t.Run(name, func(t *testing.T) {
cliBuilder := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cs.workload()...)
cli := cliBuilder.Build()
control := realController{
client: cli,
key: deploymentKey,
}
_, err := control.BuildController()
Expect(err).NotTo(HaveOccurred())
got, err := control.CalculateBatchContext(cs.release())
Expect(err).NotTo(HaveOccurred())
fmt.Printf("expect %s, but got %s", cs.result.Log(), got.Log())
Expect(got.Log()).Should(Equal(cs.result.Log()))
})
}
}
func TestRealController(t *testing.T) {
RegisterFailHandler(Fail)
release := releaseDemo.DeepCopy()
clone := deploymentDemo.DeepCopy()
stableRs, canaryRs := makeStableReplicaSets(clone), makeCanaryReplicaSets(clone)
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone, stableRs, canaryRs).Build()
// build new controller
c := NewController(cli, deploymentKey, clone.GroupVersionKind()).(*realController)
controller, err := c.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Initialize
err = controller.Initialize(release)
Expect(err).NotTo(HaveOccurred())
fetch := &apps.Deployment{}
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
// check strategy
Expect(fetch.Spec.Paused).Should(BeTrue())
Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType))
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue())
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds)))
// check annotations
Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release)))
fmt.Println(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation])
Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"},
MinReadySeconds: 0,
ProgressDeadlineSeconds: pointer.Int32(600),
})))
// check minReadyseconds field of stable replicaset
fetchRS := &apps.ReplicaSet{}
Expect(cli.Get(context.TODO(), types.NamespacedName{Name: stableRs.GetName(), Namespace: stableRs.GetNamespace()}, fetchRS)).NotTo(HaveOccurred())
Expect(fetchRS.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
c.object = fetch // mock
for {
batchContext, err := controller.CalculateBatchContext(release)
Expect(err).NotTo(HaveOccurred())
err = controller.UpgradeBatch(batchContext)
fetch := &apps.Deployment{}
// mock
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
c.object = fetch
if err == nil {
break
}
}
fetch = &apps.Deployment{}
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
// currentBatch is 1, which means br is in the second batch, maxSurge is 50%
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue())
release.Spec.ReleasePlan.BatchPartition = nil
err = controller.Finalize(release)
Expect(errors.IsRetryError(err)).Should(BeTrue())
fetch = &apps.Deployment{}
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
// check workload strategy
Expect(fetch.Spec.Paused).Should(BeFalse())
Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType))
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "20%"})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue())
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0)))
Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600)))
}
func getControlInfo(release *v1beta1.BatchRelease) string {
owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind()))
return string(owner)
}
func makeCanaryReplicaSets(d client.Object) client.Object {
deploy := d.(*apps.Deployment)
labels := deploy.Spec.Selector.DeepCopy().MatchLabels
labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(&deploy.Spec.Template, nil)
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "ReplicaSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploy.Name + rand.String(5),
Namespace: deploy.Namespace,
UID: uuid.NewUUID(),
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(deploy, deploy.GroupVersionKind()),
},
CreationTimestamp: metav1.Now(),
},
Spec: apps.ReplicaSetSpec{
Replicas: deploy.Spec.Replicas,
Selector: deploy.Spec.Selector.DeepCopy(),
Template: *deploy.Spec.Template.DeepCopy(),
},
}
}
func makeStableReplicaSets(d client.Object) client.Object {
deploy := d.(*apps.Deployment)
stableTemplate := deploy.Spec.Template.DeepCopy()
stableTemplate.Spec.Containers = containers("v1")
labels := deploy.Spec.Selector.DeepCopy().MatchLabels
labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(stableTemplate, nil)
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "ReplicaSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploy.Name + rand.String(5),
Namespace: deploy.Namespace,
UID: uuid.NewUUID(),
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(deploy, deploy.GroupVersionKind()),
},
CreationTimestamp: metav1.NewTime(time.Now().Add(-time.Hour)),
},
Spec: apps.ReplicaSetSpec{
Replicas: deploy.Spec.Replicas,
Selector: deploy.Spec.Selector.DeepCopy(),
Template: *stableTemplate,
},
}
}
func containers(version string) []corev1.Container {
return []corev1.Container{
{
Name: "busybox",
Image: fmt.Sprintf("busybox:%v", version),
},
}
}
func getStableWithReady(workload client.Object, version string) client.Object {
switch workload.(type) {
case *apps.Deployment:
deploy := workload.(*apps.Deployment)
d := deploy.DeepCopy()
d.Spec.Paused = true
d.ResourceVersion = strconv.Itoa(rand.Intn(100000000000))
d.Spec.Template.Spec.Containers = containers(version)
d.Status.ObservedGeneration = deploy.Generation
return d
case *kruiseappsv1alpha1.CloneSet:
clone := workload.(*kruiseappsv1alpha1.CloneSet)
c := clone.DeepCopy()
c.ResourceVersion = strconv.Itoa(rand.Intn(100000000000))
c.Spec.UpdateStrategy.Paused = true
c.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
c.Spec.Template.Spec.Containers = containers(version)
c.Status.ObservedGeneration = clone.Generation
return c
}
return nil
}
func retryFunction(limit int, f func() error) (err error) {
for i := limit; i >= 0; i-- {
if err = f(); err == nil {
return nil
}
}
return err
}

View File

@ -0,0 +1,106 @@
package hpa
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
HPADisableSuffix = "-DisableByRollout"
)
func DisableHPA(cli client.Client, object client.Object) error {
hpa := findHPAForWorkload(cli, object)
if hpa == nil {
return nil
}
targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef")
if err != nil || !found {
return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error())
}
ref := targetRef.(map[string]interface{})
name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string)
if !strings.HasSuffix(name, HPADisableSuffix) {
body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, addSuffix(name))
if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
return fmt.Errorf("failed to disable HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error())
}
}
return nil
}
func RestoreHPA(cli client.Client, object client.Object) error {
hpa := findHPAForWorkload(cli, object)
if hpa == nil {
return nil
}
targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef")
if err != nil || !found {
return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error())
}
ref := targetRef.(map[string]interface{})
name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string)
if strings.HasSuffix(name, HPADisableSuffix) {
body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, removeSuffix(name))
if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
return fmt.Errorf("failed to restore HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error())
}
}
return nil
}
func findHPAForWorkload(cli client.Client, object client.Object) *unstructured.Unstructured {
hpa := findHPA(cli, object, "v2")
if hpa != nil {
return hpa
}
return findHPA(cli, object, "v1")
}
func findHPA(cli client.Client, object client.Object, version string) *unstructured.Unstructured {
unstructuredList := &unstructured.UnstructuredList{}
hpaGvk := schema.GroupVersionKind{Group: "autoscaling", Kind: "HorizontalPodAutoscaler", Version: version}
unstructuredList.SetGroupVersionKind(hpaGvk)
if err := cli.List(context.TODO(), unstructuredList, &client.ListOptions{Namespace: object.GetNamespace()}); err != nil {
klog.Warningf("Get HPA for workload %v failed, because %s", klog.KObj(object), err.Error())
return nil
}
klog.Infof("Get %d HPA with %s in namespace %s in total", len(unstructuredList.Items), version, object.GetNamespace())
for _, item := range unstructuredList.Items {
scaleTargetRef, found, err := unstructured.NestedFieldCopy(item.Object, "spec", "scaleTargetRef")
if err != nil || !found {
continue
}
ref := scaleTargetRef.(map[string]interface{})
name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string)
if version == object.GetObjectKind().GroupVersionKind().GroupVersion().String() &&
kind == object.GetObjectKind().GroupVersionKind().Kind &&
removeSuffix(name) == object.GetName() {
return &item
}
}
klog.Infof("No HPA found for workload %v", klog.KObj(object))
return nil
}
func addSuffix(HPARefName string) string {
if strings.HasSuffix(HPARefName, HPADisableSuffix) {
return HPARefName
}
return HPARefName + HPADisableSuffix
}
func removeSuffix(HPARefName string) string {
refName := HPARefName
for strings.HasSuffix(refName, HPADisableSuffix) {
refName = refName[:len(refName)-len(HPADisableSuffix)]
}
return refName
}

View File

@ -0,0 +1,149 @@
package hpa
import (
"context"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var (
scheme = runtime.NewScheme()
)
func TestHPAPackage(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HPA Package Suite")
}
var _ = Describe("HPA Operations", func() {
var (
cli client.Client
object *unstructured.Unstructured
)
BeforeEach(func() {
object = &unstructured.Unstructured{}
object.SetGroupVersionKind(schema.GroupVersionKind{
Group: "apps",
Version: "v1",
Kind: "Deployment",
})
object.SetNamespace("default")
object.SetName("my-deployment")
cli = fake.NewClientBuilder().WithScheme(scheme).WithObjects(object).Build()
})
Context("when disabling and restoring HPA", func() {
It("should disable and restore HPA successfully", func() {
// Create a fake HPA
hpa := &unstructured.Unstructured{}
hpa.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v2",
Kind: "HorizontalPodAutoscaler",
})
hpa.SetNamespace("default")
hpa.SetName("my-hpa")
unstructured.SetNestedField(hpa.Object, map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "my-deployment",
}, "spec", "scaleTargetRef")
Expect(cli.Create(context.TODO(), hpa)).To(Succeed())
// Disable HPA
DisableHPA(cli, object)
fetchedHPA := &unstructured.Unstructured{}
fetchedHPA.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v2",
Kind: "HorizontalPodAutoscaler",
})
Expect(cli.Get(context.TODO(), types.NamespacedName{
Namespace: "default",
Name: "my-hpa",
}, fetchedHPA)).To(Succeed())
targetRef, found, err := unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef")
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeTrue())
ref := targetRef.(map[string]interface{})
Expect(ref["name"]).To(Equal("my-deployment" + HPADisableSuffix))
// Restore HPA
RestoreHPA(cli, object)
Expect(cli.Get(context.TODO(), types.NamespacedName{
Namespace: "default",
Name: "my-hpa",
}, fetchedHPA)).To(Succeed())
targetRef, found, err = unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef")
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeTrue())
ref = targetRef.(map[string]interface{})
Expect(ref["name"]).To(Equal("my-deployment"))
})
})
Context("when finding HPA for workload", func() {
It("should find the correct HPA", func() {
// Create a fake HPA v2
hpaV2 := &unstructured.Unstructured{}
hpaV2.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v2",
Kind: "HorizontalPodAutoscaler",
})
hpaV2.SetNamespace("default")
hpaV2.SetName("my-hpa-v2")
unstructured.SetNestedField(hpaV2.Object, map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "my-deployment",
}, "spec", "scaleTargetRef")
// Create a fake HPA v1
hpaV1 := &unstructured.Unstructured{}
hpaV1.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v1",
Kind: "HorizontalPodAutoscaler",
})
hpaV1.SetNamespace("default")
hpaV1.SetName("my-hpa-v1")
unstructured.SetNestedField(hpaV1.Object, map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "my-deployment",
}, "spec", "scaleTargetRef")
Expect(cli.Create(context.TODO(), hpaV2)).To(Succeed())
Expect(cli.Create(context.TODO(), hpaV1)).To(Succeed())
// Test finding HPA for workload
foundHPA := findHPAForWorkload(cli, object)
Expect(foundHPA).NotTo(BeNil())
Expect(foundHPA.GetName()).To(Equal("my-hpa-v2"))
// Delete v2 HPA and check if v1 is found
Expect(cli.Delete(context.TODO(), hpaV2)).To(Succeed())
foundHPA = findHPAForWorkload(cli, object)
Expect(foundHPA).NotTo(BeNil())
Expect(foundHPA.GetName()).To(Equal("my-hpa-v1"))
})
})
})

View File

@ -0,0 +1,48 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bluegreenstyle
import (
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
"github.com/openkruise/rollouts/pkg/util"
corev1 "k8s.io/api/core/v1"
)
type Interface interface {
// BuildController will get workload object and parse workload info,
// and return a initialized controller for workload.
BuildController() (Interface, error)
// GetWorkloadInfo return workload information.
GetWorkloadInfo() *util.WorkloadInfo
// ListOwnedPods fetch the pods owned by the workload.
// Note that we should list pod only if we really need it.
// reserved for future use
ListOwnedPods() ([]*corev1.Pod, error)
// CalculateBatchContext calculate current batch context
// according to release plan and current status of workload.
CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error)
// Initialize do something before rolling out, for example:
// - pause the workload
// - update: MinReadySeconds, ProgressDeadlineSeconds, Strategy
Initialize(release *v1beta1.BatchRelease) error
// UpgradeBatch upgrade workload according current batch context.
UpgradeBatch(ctx *batchcontext.BatchContext) error
// Finalize do something after rolling out, for example:
// - set pause to false, restore the original setting, delete annotation
Finalize(release *v1beta1.BatchRelease) error
}

View File

@ -21,8 +21,10 @@ import (
"fmt"
"strings"
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/util"
apps "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -63,6 +65,42 @@ func IsControlledByBatchRelease(release *v1beta1.BatchRelease, object client.Obj
return false
}
// only when IsReadyForBlueGreenRelease returns true, can we go on to the next batch
func ValidateReadyForBlueGreenRelease(object client.Object) error {
// check the annotation
if object.GetAnnotations()[util.BatchReleaseControlAnnotation] == "" {
return fmt.Errorf("workload has no control info annotation")
}
switch o := object.(type) {
case *apps.Deployment:
// must be RollingUpdate
if len(o.Spec.Strategy.Type) > 0 && o.Spec.Strategy.Type != apps.RollingUpdateDeploymentStrategyType {
return fmt.Errorf("deployment strategy type is not RollingUpdate")
}
if o.Spec.Strategy.RollingUpdate == nil {
return fmt.Errorf("deployment strategy rollingUpdate is nil")
}
// MinReadySeconds and ProgressDeadlineSeconds must be set
if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds || o.Spec.ProgressDeadlineSeconds == nil || *o.Spec.ProgressDeadlineSeconds != v1beta1.MaxProgressSeconds {
return fmt.Errorf("deployment strategy minReadySeconds or progressDeadlineSeconds is not MaxReadySeconds or MaxProgressSeconds")
}
case *appsv1alpha1.CloneSet:
// must be ReCreate
if len(o.Spec.UpdateStrategy.Type) > 0 && o.Spec.UpdateStrategy.Type != appsv1alpha1.RecreateCloneSetUpdateStrategyType {
return fmt.Errorf("cloneSet strategy type is not ReCreate")
}
// MinReadySeconds and ProgressDeadlineSeconds must be set
if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds {
return fmt.Errorf("cloneSet strategy minReadySeconds is not MaxReadySeconds")
}
default:
panic("unsupported workload type to ValidateReadyForBlueGreenRelease function")
}
return nil
}
// BuildReleaseControlInfo return a NewControllerRef of release with escaped `"`.
func BuildReleaseControlInfo(release *v1beta1.BatchRelease) string {
owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind()))
@ -112,3 +150,101 @@ func IsCurrentMoreThanOrEqualToDesired(current, desired intstr.IntOrString) bool
desiredNum, _ := intstr.GetScaledValueFromIntOrPercent(&desired, 10000000, true)
return currentNum >= desiredNum
}
// GetDeploymentStrategy decode the strategy object for advanced deployment
// from the annotation "rollouts.kruise.io/original-deployment-strategy"
func GetOriginalSetting(object client.Object) (OriginalDeploymentStrategy, error) {
setting := OriginalDeploymentStrategy{}
settingStr := object.GetAnnotations()[v1beta1.OriginalDeploymentStrategyAnnotation]
if settingStr == "" {
return setting, nil
}
err := json.Unmarshal([]byte(settingStr), &setting)
return setting, err
}
// InitOriginalSetting will update the original setting based on the workload object
// note: update the maxSurge and maxUnavailable only when MaxSurge and MaxUnavailable are nil,
// which means they should keep unchanged in continuous release (though continuous release isn't supported for now)
func InitOriginalSetting(setting *OriginalDeploymentStrategy, object client.Object) {
var changeLogs []string
switch o := object.(type) {
case *apps.Deployment:
if setting.MaxSurge == nil {
setting.MaxSurge = getMaxSurgeFromDeployment(o.Spec.Strategy.RollingUpdate)
changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String()))
}
if setting.MaxUnavailable == nil {
setting.MaxUnavailable = getMaxUnavailableFromDeployment(o.Spec.Strategy.RollingUpdate)
changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String()))
}
if setting.ProgressDeadlineSeconds == nil {
setting.ProgressDeadlineSeconds = getIntPtrOrDefault(o.Spec.ProgressDeadlineSeconds, 600)
changeLogs = append(changeLogs, fmt.Sprintf("progressDeadlineSeconds changed from nil to %d", *setting.ProgressDeadlineSeconds))
}
if setting.MinReadySeconds == 0 {
setting.MinReadySeconds = o.Spec.MinReadySeconds
changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds))
}
case *appsv1alpha1.CloneSet:
if setting.MaxSurge == nil {
setting.MaxSurge = getMaxSurgeFromCloneset(o.Spec.UpdateStrategy)
changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String()))
}
if setting.MaxUnavailable == nil {
setting.MaxUnavailable = getMaxUnavailableFromCloneset(o.Spec.UpdateStrategy)
changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String()))
}
if setting.ProgressDeadlineSeconds == nil {
// cloneset is planned to support progressDeadlineSeconds field
}
if setting.MinReadySeconds == 0 {
setting.MinReadySeconds = o.Spec.MinReadySeconds
changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds))
}
default:
panic(fmt.Errorf("unsupported object type %T", o))
}
if len(changeLogs) == 0 {
klog.InfoS("InitOriginalSetting: original setting unchanged", "object", object.GetName())
return
}
klog.InfoS("InitOriginalSetting: original setting updated", "object", object.GetName(), "changes", strings.Join(changeLogs, ";"))
}
func getMaxSurgeFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString {
defaultMaxSurge := intstr.FromString("25%")
if ru == nil || ru.MaxSurge == nil {
return &defaultMaxSurge
}
return ru.MaxSurge
}
func getMaxUnavailableFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString {
defaultMaxAnavailale := intstr.FromString("25%")
if ru == nil || ru.MaxUnavailable == nil {
return &defaultMaxAnavailale
}
return ru.MaxUnavailable
}
func getMaxSurgeFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString {
defaultMaxSurge := intstr.FromString("0%")
if us.MaxSurge == nil {
return &defaultMaxSurge
}
return us.MaxSurge
}
func getMaxUnavailableFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString {
defaultMaxUnavailable := intstr.FromString("20%")
if us.MaxUnavailable == nil {
return &defaultMaxUnavailable
}
return us.MaxUnavailable
}
func getIntPtrOrDefault(ptr *int32, defaultVal int32) *int32 {
if ptr == nil {
return &defaultVal
}
return ptr
}

View File

@ -62,7 +62,6 @@ func (m *blueGreenReleaseManager) runCanary(c *RolloutContext) error {
}
if m.doCanaryJump(c) {
klog.Infof("rollout(%s/%s) canary step jumped", c.Rollout.Namespace, c.Rollout.Name)
return nil
}
// When the first batch is trafficRouting rolling and the next steps are rolling release,
@ -323,6 +322,9 @@ func (m *blueGreenReleaseManager) doCanaryFinalising(c *RolloutContext) (bool, e
// route all traffic to new version
case v1beta1.FinalisingStepRouteTrafficToNew:
retry, err = m.trafficRoutingManager.RouteAllTrafficToNewVersion(tr)
// dangerous, wait endlessly, only for debugging use
case v1beta1.FinalisingStepWaitEndless:
retry, err = true, fmt.Errorf("only for debugging, just wait endlessly")
default:
nextStep = nextBlueGreenTask(c.FinalizeReason, "")
klog.Warningf("unexpected finalising step, current step(%s), start from the first step(%s)", blueGreenStatus.FinalisingStep, nextStep)
@ -392,7 +394,10 @@ func (m *blueGreenReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, blu
// TODO: optimize the logic to better understand
blueGreenStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d",
br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1)
// br.Status.Message records messages that help users to understand what is going wrong
if len(br.Status.Message) > 0 {
blueGreenStatus.Message += fmt.Sprintf(", %s", br.Status.Message)
}
// sync rolloutId from blueGreenStatus to BatchRelease
if blueGreenStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID {
body := fmt.Sprintf(`{"spec":{"releasePlan":{"rolloutID":"%s"}}}`, blueGreenStatus.ObservedRolloutID)
@ -450,10 +455,9 @@ func nextBlueGreenTask(reason string, currentTask v1beta1.FinalisingStepType) v1
default: // others: disabled/deleting rollout
taskSequence = []v1beta1.FinalisingStepType{
v1beta1.FinalisingStepRestoreStableService,
v1beta1.FinalisingStepResumeWorkload, // scale up new, scale down old
v1beta1.FinalisingStepRouteTrafficToStable,
v1beta1.FinalisingStepRemoveCanaryService,
v1beta1.FinalisingStepResumeWorkload, // scale up new, scale down old
v1beta1.FinalisingStepReleaseWorkloadControl,
}
}

View File

@ -71,7 +71,6 @@ func (m *canaryReleaseManager) runCanary(c *RolloutContext) error {
}
if m.doCanaryJump(c) {
klog.Infof("rollout(%s/%s) canary step jumped", c.Rollout.Namespace, c.Rollout.Name)
return nil
}
// When the first batch is trafficRouting rolling and the next steps are rolling release,
@ -457,6 +456,10 @@ func (m *canaryReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, canary
// TODO: optimize the logic to better understand
canaryStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d",
br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1)
// br.Status.Message records messages that help users to understand what is going wrong
if len(br.Status.Message) > 0 {
canaryStatus.Message += fmt.Sprintf(", %s", br.Status.Message)
}
// sync rolloutId from canaryStatus to BatchRelease
if canaryStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID {

View File

@ -25,6 +25,7 @@ import (
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/trafficrouting"
"github.com/openkruise/rollouts/pkg/util"
utilerrors "github.com/openkruise/rollouts/pkg/util/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -110,7 +111,13 @@ func (r *RolloutReconciler) reconcileRolloutProgressing(rollout *v1beta1.Rollout
case v1alpha1.ProgressingReasonInRolling:
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
err = r.doProgressingInRolling(rolloutContext)
if err != nil {
if utilerrors.IsBadRequest(err) {
// For fatal errors, do not retry as it wastes resources and has no effect.
// therefore, we don't propagate the error, but just log it.
// user should do sth instead, eg. for bluegreen continuous release scenario, user should do rollback
klog.Warningf("rollout(%s/%s) doProgressingInRolling error(%s)", rollout.Namespace, rollout.Name, err.Error())
return nil, nil
} else if err != nil {
return nil, err
}
@ -225,11 +232,29 @@ func (r *RolloutReconciler) handleRolloutPaused(rollout *v1beta1.Rollout, newSta
return nil
}
/*
continuous release (or successive release) is not supported for bluegreen release, especially for cloneset,
here is why:
suppose we are releasing a cloneSet, which has pods of both v1 and v2 for now. If we release v3 before
v2 is fully released, the cloneSet controller might scale down pods without distinguishing between v1 and v2.
This is because our implementation is based on the minReadySeconds, pods of both v1 and v2 are "unavailable"
in the progress of rollout.
Deployment actually has the same problem, however it is possible to bypass this issue for Deployment by setting
minReadySeconds for replicaset separately; unfortunately this workaround seems not work for cloneset
*/
func (r *RolloutReconciler) handleContinuousRelease(c *RolloutContext) error {
r.Recorder.Eventf(c.Rollout, corev1.EventTypeNormal, "Progressing", "workload continuous publishing canaryRevision, then restart publishing")
klog.Infof("rollout(%s/%s) workload continuous publishing canaryRevision from(%s) -> to(%s), then restart publishing",
c.Rollout.Namespace, c.Rollout.Name, c.NewStatus.GetCanaryRevision(), c.Workload.CanaryRevision)
// do nothing for blue-green release
if c.Rollout.Spec.Strategy.IsBlueGreenRelease() {
cond := util.GetRolloutCondition(*c.NewStatus, v1beta1.RolloutConditionProgressing)
cond.Message = "new version releasing detected in the progress of blue-green release, please rollback first"
c.NewStatus.Message = cond.Message
return utilerrors.NewBadRequestError(fmt.Errorf("new version releasing detected in the progress of blue-green release, please rollback first"))
}
done, err := r.doProgressingReset(c)
if err != nil {
klog.Errorf("rollout(%s/%s) doProgressingReset failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error())
@ -601,7 +626,8 @@ func newTrafficRoutingContext(c *RolloutContext) *trafficrouting.TrafficRoutingC
revisionLabelKey = c.Workload.RevisionLabelKey
}
var selectorPatch map[string]string
if !c.Rollout.Spec.Strategy.DisableGenerateCanaryService() && c.Rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata != nil {
if !c.Rollout.Spec.Strategy.DisableGenerateCanaryService() && c.Rollout.Spec.Strategy.Canary != nil &&
c.Rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata != nil {
selectorPatch = c.Rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata.Labels
}
return &trafficrouting.TrafficRoutingContext{

View File

@ -153,12 +153,7 @@ func (d *delegatingReader) List(ctx context.Context, list client.ObjectList, opt
return d.CacheReader.List(ctx, list, opts...)
}
var DisableDeepCopy = disableDeepCopy{}
type disableDeepCopy struct{}
func (_ disableDeepCopy) ApplyToList(_ *client.ListOptions) {
}
var DisableDeepCopy = client.UnsafeDisableDeepCopy
func isDisableDeepCopy(opts []client.ListOption) bool {
for _, opt := range opts {

View File

@ -45,6 +45,16 @@ func GetRolloutCondition(status v1beta1.RolloutStatus, condType v1beta1.RolloutC
return nil
}
func GetBatchReleaseCondition(status v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) *v1beta1.RolloutCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// SetRolloutCondition updates the rollout to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason, then we are not going to update
// by returning false. Returns true if the condition was updated
@ -63,6 +73,21 @@ func SetRolloutCondition(status *v1beta1.RolloutStatus, condition v1beta1.Rollou
return true
}
func SetBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condition v1beta1.RolloutCondition) bool {
currentCond := GetBatchReleaseCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason &&
currentCond.Message == condition.Message {
return false
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
return true
}
// filterOutCondition returns a new slice of rollout conditions without conditions with the provided type.
func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1.RolloutConditionType) []v1beta1.RolloutCondition {
var newConditions []v1beta1.RolloutCondition
@ -78,3 +103,7 @@ func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1.
func RemoveRolloutCondition(status *v1beta1.RolloutStatus, condType v1beta1.RolloutConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
func RemoveBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}

View File

@ -289,7 +289,7 @@ func (r *ControllerFinder) getDeployment(namespace string, ref *rolloutv1beta1.O
return &Workload{IsStatusConsistent: false}, nil
}
// stable replicaSet
stableRs, err := r.getDeploymentStableRs(stable)
stableRs, err := r.GetDeploymentStableRs(stable)
if err != nil || stableRs == nil {
return &Workload{IsStatusConsistent: false}, err
}
@ -318,7 +318,7 @@ func (r *ControllerFinder) getDeployment(namespace string, ref *rolloutv1beta1.O
if err != nil || canary == nil {
return workload, err
}
canaryRs, err := r.getDeploymentStableRs(canary)
canaryRs, err := r.GetDeploymentStableRs(canary)
if err != nil || canaryRs == nil {
return workload, err
}
@ -422,7 +422,7 @@ func (r *ControllerFinder) GetReplicaSetsForDeployment(obj *apps.Deployment) ([]
return rss, nil
}
func (r *ControllerFinder) getDeploymentStableRs(obj *apps.Deployment) (*apps.ReplicaSet, error) {
func (r *ControllerFinder) GetDeploymentStableRs(obj *apps.Deployment) (*apps.ReplicaSet, error) {
rss, err := r.GetReplicaSetsForDeployment(obj)
if err != nil {
return nil, err

69
pkg/util/errors/types.go Normal file
View File

@ -0,0 +1,69 @@
package errors
import (
"errors"
"fmt"
)
// RetryError represents a benign error that can be handled or ignored by the caller.
// It encapsulates information that is non-critical and does not require immediate attention.
type RetryError struct {
Err error
}
// Error implements the error interface for RetryError.
// It returns the error message of the encapsulated error or a default message.
func (e *RetryError) Error() string {
if e.Err != nil {
return fmt.Sprintf("[retry]: %s", e.Err.Error())
}
return "retry error"
}
// NewRetryError creates a new instance of RetryError.
func NewRetryError(err error) *RetryError {
return &RetryError{Err: err}
}
func IsRetryError(err error) bool {
var re *RetryError
return errors.As(err, &re)
}
func AsRetryError(err error, target **RetryError) bool {
return errors.As(err, target)
}
// BadRequestError represents a fatal error that requires special handling.
// Such errors are critical and may necessitate logging, alerts, or even program termination.
type BadRequestError struct {
Err error
}
// Error implements the error interface for BadRequestError.
// It returns the error message of the encapsulated error or a default message.
func (e *BadRequestError) Error() string {
if e.Err != nil {
return e.Err.Error()
}
return "fatal error"
}
// NewBadRequestError creates a new instance of BadRequestError.
// It encapsulates the provided error, marking it as critical.
func NewBadRequestError(err error) *BadRequestError {
return &BadRequestError{Err: err}
}
// IsBadRequest checks whether the provided error is of type BadRequestError.
// It returns true if the error is a BadRequestError or wraps a BadRequestError, false otherwise.
func IsBadRequest(err error) bool {
var brErr *BadRequestError
return AsBadRequest(err, &brErr)
}
// AsBadRequest attempts to cast the provided error to a BadRequestError.
// It returns true if the casting is successful, allowing the caller to handle it accordingly.
func AsBadRequest(err error, target **BadRequestError) bool {
return errors.As(err, target)
}

View File

@ -23,6 +23,8 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@ -222,3 +224,156 @@ func (s *DeploymentPatch) UpdatePaused(paused bool) *DeploymentPatch {
}
return s
}
func (s *DeploymentPatch) UpdateMinReadySeconds(seconds int32) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
spec["minReadySeconds"] = seconds
}
return s
}
func (s *DeploymentPatch) UpdateProgressDeadlineSeconds(seconds *int32) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
spec["progressDeadlineSeconds"] = seconds
}
return s
}
func (s *DeploymentPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["strategy"]; !ok {
spec["strategy"] = make(map[string]interface{})
}
strategy := spec["strategy"].(map[string]interface{})
if _, ok := strategy["rollingUpdate"]; !ok {
strategy["rollingUpdate"] = make(map[string]interface{})
}
rollingUpdate := strategy["rollingUpdate"].(map[string]interface{})
rollingUpdate["maxSurge"] = maxSurge
}
return s
}
func (s *DeploymentPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["strategy"]; !ok {
spec["strategy"] = make(map[string]interface{})
}
strategy := spec["strategy"].(map[string]interface{})
if _, ok := strategy["rollingUpdate"]; !ok {
strategy["rollingUpdate"] = make(map[string]interface{})
}
rollingUpdate := strategy["rollingUpdate"].(map[string]interface{})
rollingUpdate["maxUnavailable"] = maxUnavailable
}
return s
}
type ClonesetPatch struct {
CommonPatch
}
func NewClonesetPatch() *ClonesetPatch {
return &ClonesetPatch{CommonPatch{PatchType: types.MergePatchType, PatchData: make(map[string]interface{})}}
}
func (s *ClonesetPatch) UpdateMinReadySeconds(seconds int32) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updateMinReadySeconds to %v", seconds)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
spec["minReadySeconds"] = seconds
}
return s
}
func (s *ClonesetPatch) UpdatePaused(paused bool) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updatePaused to %v", paused)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["paused"] = paused
}
return s
}
func (s *ClonesetPatch) UpdatePartiton(partition *intstr.IntOrString) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updatePartition to %v", partition)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["partition"] = partition
}
return s
}
func (s *ClonesetPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updateMaxSurge to %v", maxSurge)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["maxSurge"] = maxSurge
}
return s
}
func (s *ClonesetPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updateMaxUnavailable to %v", maxUnavailable)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["maxUnavailable"] = maxUnavailable
}
return s
}

View File

@ -154,7 +154,7 @@ func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string {
func SafeEncodeString(s string) string {
r := make([]byte, len(s))
for i, b := range []rune(s) {
r[i] = alphanums[(int(b) % len(alphanums))]
r[i] = alphanums[int(b)%len(alphanums)]
}
return string(r)
}
@ -329,11 +329,11 @@ func IsWorkloadType(object client.Object, t WorkloadType) bool {
// DeploymentMaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func DeploymentMaxUnavailable(deployment *apps.Deployment) int32 {
strategy := deployment.Spec.Strategy
if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *(deployment.Spec.Replicas) == 0 {
if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *deployment.Spec.Replicas == 0 {
return int32(0)
}
// Error caught by validation
_, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
_, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *deployment.Spec.Replicas)
if maxUnavailable > *deployment.Spec.Replicas {
return *deployment.Spec.Replicas
}

View File

@ -36,6 +36,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
var (
blueGreenSupportWorkloadGVKs = []*schema.GroupVersionKind{
&util.ControllerKindDep,
&util.ControllerKruiseKindCS,
}
)
// RolloutCreateUpdateHandler handles Rollout
type RolloutCreateUpdateHandler struct {
// To use the client, you need to do the following:
@ -204,12 +211,12 @@ func (h *RolloutCreateUpdateHandler) validateRolloutConflict(rollout *appsv1beta
}
func validateRolloutSpec(c *validateContext, rollout *appsv1beta1.Rollout, fldPath *field.Path) field.ErrorList {
errList := validateRolloutSpecObjectRef(&rollout.Spec.WorkloadRef, fldPath.Child("ObjectRef"))
errList := validateRolloutSpecObjectRef(c, &rollout.Spec.WorkloadRef, fldPath.Child("ObjectRef"))
errList = append(errList, validateRolloutSpecStrategy(c, &rollout.Spec.Strategy, fldPath.Child("Strategy"))...)
return errList
}
func validateRolloutSpecObjectRef(workloadRef *appsv1beta1.ObjectRef, fldPath *field.Path) field.ErrorList {
func validateRolloutSpecObjectRef(c *validateContext, workloadRef *appsv1beta1.ObjectRef, fldPath *field.Path) field.ErrorList {
if workloadRef == nil {
return field.ErrorList{field.Invalid(fldPath.Child("WorkloadRef"), workloadRef, "WorkloadRef is required")}
}
@ -218,6 +225,14 @@ func validateRolloutSpecObjectRef(workloadRef *appsv1beta1.ObjectRef, fldPath *f
if !util.IsSupportedWorkload(gvk) {
return field.ErrorList{field.Invalid(fldPath.Child("WorkloadRef"), workloadRef, "WorkloadRef kind is not supported")}
}
if c.style == string(appsv1beta1.BlueGreenRollingStyle) {
for _, allowed := range blueGreenSupportWorkloadGVKs {
if gvk.Group == allowed.Group && gvk.Kind == allowed.Kind {
return nil
}
}
return field.ErrorList{field.Invalid(fldPath.Child("WorkloadRef"), workloadRef, "WorkloadRef kind is not supported for bluegreen style")}
}
return nil
}
@ -378,7 +393,7 @@ func (h *RolloutCreateUpdateHandler) InjectDecoder(d *admission.Decoder) error {
func GetContextFromv1beta1Rollout(rollout *appsv1beta1.Rollout) *validateContext {
if rollout.Spec.Strategy.Canary == nil && rollout.Spec.Strategy.BlueGreen == nil {
return nil
return &validateContext{}
}
style := rollout.Spec.Strategy.GetRollingStyle()
if appsv1beta1.IsRealPartition(rollout) {

View File

@ -128,7 +128,7 @@ func prepareToWrite(dir string) error {
// TODO: figure out if we can reduce the permission. (Now it's 0777)
err = os.MkdirAll(dir, 0777)
if err != nil {
return fmt.Errorf("can't create dir: %v", dir)
return fmt.Errorf("can't create dir: %v, err: %s", dir, err.Error())
}
case err != nil:
return err

View File

@ -0,0 +1,241 @@
/*
Copyright 2019 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mutating
import (
"context"
"encoding/json"
"math"
"net/http"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/util"
utilclient "github.com/openkruise/rollouts/pkg/util/client"
util2 "github.com/openkruise/rollouts/pkg/webhook/util"
"github.com/openkruise/rollouts/pkg/webhook/util/configuration"
admissionv1 "k8s.io/api/admission/v1"
v1 "k8s.io/api/admissionregistration/v1"
apps "k8s.io/api/apps/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
labels2 "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// UnifiedWorkloadHandler handles Pod
type UnifiedWorkloadHandler struct {
// To use the client, you need to do the following:
// - uncomment it
// - import sigs.k8s.io/controller-runtime/pkg/client
// - uncomment the InjectClient method at the bottom of this file.
Client client.Client
// Decoder decodes objects
Decoder *admission.Decoder
Finder *util.ControllerFinder
}
var _ admission.Handler = &UnifiedWorkloadHandler{}
// Handle handles admission requests.
func (h *UnifiedWorkloadHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
// if subResources, then ignore
if req.Operation != admissionv1.Update || req.SubResource != "" {
return admission.Allowed("")
}
meetingRules, err := h.checkWorkloadRules(ctx, req)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !meetingRules {
return admission.Allowed("")
}
switch req.Kind.Group {
// kruise cloneSet
case kruiseappsv1alpha1.GroupVersion.Group:
switch req.Kind.Kind {
case util.ControllerKruiseKindCS.Kind, util.ControllerKruiseKindDS.Kind:
return admission.Allowed("")
}
// native k8s deloyment
case apps.SchemeGroupVersion.Group:
switch req.Kind.Kind {
case util.ControllerKindDep.Kind:
return admission.Allowed("")
}
}
// handle other workload types, including native/advanced statefulset
{
newObj := &unstructured.Unstructured{}
newObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind})
if err := h.Decoder.Decode(req, newObj); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !util.IsWorkloadType(newObj, util.StatefulSetType) && req.Kind.Kind != util.ControllerKindSts.Kind {
return admission.Allowed("")
}
oldObj := &unstructured.Unstructured{}
oldObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind})
if err := h.Decoder.Decode(
admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{Object: req.AdmissionRequest.OldObject}},
oldObj); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
changed, err := h.handleStatefulSetLikeWorkload(newObj, oldObj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !changed {
return admission.Allowed("")
}
marshalled, err := json.Marshal(newObj.Object)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
return admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled)
}
}
func (h *UnifiedWorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (bool, error) {
// indicate whether the workload can enter the rollout process
// 1. replicas > 0
if util.GetReplicas(newObj) == 0 || !util.IsStatefulSetRollingUpdate(newObj) {
return false, nil
}
oldTemplate, newTemplate := util.GetTemplate(oldObj), util.GetTemplate(newObj)
if oldTemplate == nil || newTemplate == nil {
return false, nil
}
oldMetadata, newMetadata := util.GetMetadata(oldObj), util.GetMetadata(newObj)
if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] != "" &&
oldMetadata.Annotations[appsv1beta1.RolloutIDLabel] == newMetadata.Annotations[appsv1beta1.RolloutIDLabel] {
return false, nil
} else if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] == "" && util.EqualIgnoreHash(oldTemplate, newTemplate) {
return false, nil
}
rollout, err := h.fetchMatchedRollout(newObj)
if err != nil {
return false, err
} else if rollout == nil || rollout.Spec.Strategy.IsEmptyRelease() {
return false, nil
}
util.SetStatefulSetPartition(newObj, math.MaxInt16)
state := &util.RolloutState{RolloutName: rollout.Name}
by, _ := json.Marshal(state)
annotation := newObj.GetAnnotations()
if annotation == nil {
annotation = map[string]string{}
}
annotation[util.InRolloutProgressingAnnotation] = string(by)
newObj.SetAnnotations(annotation)
klog.Infof("StatefulSet(%s/%s) will be released incrementally based on Rollout(%s)", newMetadata.Namespace, newMetadata.Name, rollout.Name)
return true, nil
}
func (h *UnifiedWorkloadHandler) fetchMatchedRollout(obj client.Object) (*appsv1beta1.Rollout, error) {
oGv := obj.GetObjectKind().GroupVersionKind()
rolloutList := &appsv1beta1.RolloutList{}
if err := h.Client.List(context.TODO(), rolloutList, utilclient.DisableDeepCopy,
&client.ListOptions{Namespace: obj.GetNamespace()}); err != nil {
klog.Errorf("UnifiedWorkloadHandler List rollout failed: %s", err.Error())
return nil, err
}
for i := range rolloutList.Items {
rollout := &rolloutList.Items[i]
if !rollout.DeletionTimestamp.IsZero() {
continue
}
if rollout.Status.Phase == appsv1beta1.RolloutPhaseDisabled {
klog.Infof("Disabled rollout(%s/%s) fetched when fetching matched rollout", rollout.Namespace, rollout.Name)
continue
}
ref := rollout.Spec.WorkloadRef
gv, err := schema.ParseGroupVersion(ref.APIVersion)
if err != nil {
klog.Warningf("ParseGroupVersion rollout(%s/%s) ref failed: %s", rollout.Namespace, rollout.Name, err.Error())
continue
}
if oGv.Group == gv.Group && oGv.Kind == ref.Kind && obj.GetName() == ref.Name {
return rollout, nil
}
}
return nil, nil
}
var _ inject.Client = &UnifiedWorkloadHandler{}
// InjectClient injects the client into the UnifiedWorkloadHandler
func (h *UnifiedWorkloadHandler) InjectClient(c client.Client) error {
h.Client = c
h.Finder = util.NewControllerFinder(c)
return nil
}
var _ admission.DecoderInjector = &UnifiedWorkloadHandler{}
// InjectDecoder injects the decoder into the UnifiedWorkloadHandler
func (h *UnifiedWorkloadHandler) InjectDecoder(d *admission.Decoder) error {
h.Decoder = d
return nil
}
func (h *UnifiedWorkloadHandler) checkWorkloadRules(ctx context.Context, req admission.Request) (bool, error) {
webhook := &v1.MutatingWebhookConfiguration{}
if err := h.Client.Get(ctx, types.NamespacedName{Name: configuration.MutatingWebhookConfigurationName}, webhook); err != nil {
return false, err
}
newObject := unstructured.Unstructured{}
if err := h.Decoder.Decode(req, &newObject); err != nil {
return false, err
}
labels := newObject.GetLabels()
attr, err := constructAttr(req)
if err != nil {
return false, err
}
for _, webhook := range webhook.Webhooks {
for _, rule := range webhook.Rules {
m := util2.Matcher{Rule: rule, Attr: attr}
if m.Matches() {
selector, err := v12.LabelSelectorAsSelector(webhook.ObjectSelector)
if err != nil {
return false, nil
}
if selector.Matches(labels2.Set(labels)) {
return true, nil
}
}
}
}
return false, nil
}

View File

@ -0,0 +1,110 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mutating
import (
"context"
"encoding/json"
"math"
"reflect"
"testing"
kruiseappsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
appsv1beta1 "github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/util"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
func TestHandleStatefulSet(t *testing.T) {
cases := []struct {
name string
getObjs func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet)
expectObj func() *kruiseappsv1beta1.StatefulSet
getRollout func() *appsv1beta1.Rollout
isError bool
}{
{
name: "cloneSet image v1->v2, matched rollout",
getObjs: func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet) {
oldObj := statefulset.DeepCopy()
newObj := statefulset.DeepCopy()
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
return oldObj, newObj
},
expectObj: func() *kruiseappsv1beta1.StatefulSet {
obj := statefulset.DeepCopy()
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
obj.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32(math.MaxInt16)
return obj
},
getRollout: func() *appsv1beta1.Rollout {
obj := rolloutDemo.DeepCopy()
obj.Spec.WorkloadRef = appsv1beta1.ObjectRef{
APIVersion: "apps.kruise.io/v1beta1",
Kind: "StatefulSet",
Name: "echoserver",
}
return obj
},
},
}
decoder, _ := admission.NewDecoder(scheme)
for _, cs := range cases {
t.Run(cs.name, func(t *testing.T) {
client := fake.NewClientBuilder().WithScheme(scheme).Build()
h := UnifiedWorkloadHandler{
Client: client,
Decoder: decoder,
Finder: util.NewControllerFinder(client),
}
rollout := cs.getRollout()
if err := client.Create(context.TODO(), rollout); err != nil {
t.Errorf(err.Error())
}
oldObj, newObj := cs.getObjs()
oldO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(oldObj)
newO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(newObj)
oldUnstructured := &unstructured.Unstructured{Object: oldO}
newUnstructured := &unstructured.Unstructured{Object: newO}
oldUnstructured.SetGroupVersionKind(newObj.GroupVersionKind())
newUnstructured.SetGroupVersionKind(newObj.GroupVersionKind())
_, err := h.handleStatefulSetLikeWorkload(newUnstructured, oldUnstructured)
if cs.isError && err == nil {
t.Fatal("handleStatefulSetLikeWorkload failed")
} else if !cs.isError && err != nil {
t.Fatalf(err.Error())
}
newStructured := &kruiseappsv1beta1.StatefulSet{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(newUnstructured.Object, newStructured)
if err != nil {
t.Fatal("DefaultUnstructuredConvert failed")
}
expect := cs.expectObj()
if !reflect.DeepEqual(newStructured, expect) {
by, _ := json.Marshal(newStructured)
t.Fatalf("handlerCloneSet failed, and new(%s)", string(by))
}
})
}
}

View File

@ -23,8 +23,6 @@ import (
// +kubebuilder:webhook:path=/mutate-apps-kruise-io-v1alpha1-cloneset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps.kruise.io,resources=clonesets,verbs=update,versions=v1alpha1,name=mcloneset.kb.io
// +kubebuilder:webhook:path=/mutate-apps-kruise-io-v1alpha1-daemonset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps.kruise.io,resources=daemonsets,verbs=update,versions=v1alpha1,name=mdaemonset.kb.io
// +kubebuilder:webhook:path=/mutate-apps-v1-deployment,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps,resources=deployments,verbs=update,versions=v1,name=mdeployment.kb.io
// +kubebuilder:webhook:path=/mutate-apps-v1-statefulset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps,resources=statefulsets,verbs=update,versions=v1,name=mstatefulset.kb.io
// +kubebuilder:webhook:path=/mutate-apps-kruise-io-statefulset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps.kruise.io,resources=statefulsets,verbs=create;update,versions=v1alpha1;v1beta1,name=madvancedstatefulset.kb.io
// +kubebuilder:webhook:path=/mutate-unified-workload,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=*,resources=*,verbs=create;update,versions=*,name=munifiedworload.kb.io
var (
@ -32,9 +30,7 @@ var (
HandlerMap = map[string]admission.Handler{
"mutate-apps-kruise-io-v1alpha1-cloneset": &WorkloadHandler{},
"mutate-apps-v1-deployment": &WorkloadHandler{},
"mutate-apps-v1-statefulset": &WorkloadHandler{},
"mutate-apps-kruise-io-statefulset": &WorkloadHandler{},
"mutate-unified-workload": &WorkloadHandler{},
"mutate-apps-kruise-io-v1alpha1-daemonset": &WorkloadHandler{},
"mutate-unified-workload": &UnifiedWorkloadHandler{},
}
)

View File

@ -189,87 +189,20 @@ func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) adm
}
}
// handle other workload types, including native/advanced statefulset
{
newObj := &unstructured.Unstructured{}
newObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind})
if err := h.Decoder.Decode(req, newObj); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !util.IsWorkloadType(newObj, util.StatefulSetType) && req.Kind.Kind != util.ControllerKindSts.Kind {
return admission.Allowed("")
}
oldObj := &unstructured.Unstructured{}
oldObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind})
if err := h.Decoder.Decode(
admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{Object: req.AdmissionRequest.OldObject}},
oldObj); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
changed, err := h.handleStatefulSetLikeWorkload(newObj, oldObj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !changed {
return admission.Allowed("")
}
marshalled, err := json.Marshal(newObj.Object)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
return admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled)
}
}
func (h *WorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (bool, error) {
// indicate whether the workload can enter the rollout process
// 1. replicas > 0
if util.GetReplicas(newObj) == 0 || !util.IsStatefulSetRollingUpdate(newObj) {
return false, nil
}
oldTemplate, newTemplate := util.GetTemplate(oldObj), util.GetTemplate(newObj)
if oldTemplate == nil || newTemplate == nil {
return false, nil
}
oldMetadata, newMetadata := util.GetMetadata(oldObj), util.GetMetadata(newObj)
if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] != "" &&
oldMetadata.Annotations[appsv1beta1.RolloutIDLabel] == newMetadata.Annotations[appsv1beta1.RolloutIDLabel] {
return false, nil
} else if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] == "" && util.EqualIgnoreHash(oldTemplate, newTemplate) {
return false, nil
}
rollout, err := h.fetchMatchedRollout(newObj)
if err != nil {
return false, err
} else if rollout == nil || rollout.Spec.Strategy.IsEmptyRelease() {
return false, nil
}
util.SetStatefulSetPartition(newObj, math.MaxInt16)
state := &util.RolloutState{RolloutName: rollout.Name}
by, _ := json.Marshal(state)
annotation := newObj.GetAnnotations()
if annotation == nil {
annotation = map[string]string{}
}
annotation[util.InRolloutProgressingAnnotation] = string(by)
newObj.SetAnnotations(annotation)
klog.Infof("StatefulSet(%s/%s) will be released incrementally based on Rollout(%s)", newMetadata.Namespace, newMetadata.Name, rollout.Name)
return true, nil
return admission.Allowed("")
}
func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (bool, error) {
// in rollout progressing
if newObj.Annotations[util.InRolloutProgressingAnnotation] != "" {
modified := false
if !newObj.Spec.Paused {
modified = true
newObj.Spec.Paused = true
}
strategy := util.GetDeploymentStrategy(newObj)
switch strings.ToLower(string(strategy.RollingStyle)) {
case strings.ToLower(string(appsv1alpha1.PartitionRollingStyle)):
// partition
if strings.EqualFold(string(strategy.RollingStyle), string(appsv1alpha1.PartitionRollingStyle)) {
if !newObj.Spec.Paused {
modified = true
newObj.Spec.Paused = true
}
// Make sure it is always Recreate to disable native controller
if newObj.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType {
modified = true
@ -287,7 +220,24 @@ func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (boo
}
appsv1alpha1.SetDefaultDeploymentStrategy(&strategy)
setDeploymentStrategyAnnotation(strategy, newObj)
default:
// bluegreenStyle
} else if len(newObj.GetAnnotations()[appsv1beta1.OriginalDeploymentStrategyAnnotation]) > 0 {
if isEffectiveDeploymentRevisionChange(oldObj, newObj) {
newObj.Spec.Paused, modified = true, true
// disallow continuous release, allow rollback
klog.Warningf("rollback or continuous release detected in Deployment webhook, while only rollback is allowed for bluegreen release for now")
}
// not allow to modify Strategy.Type to Recreate
if newObj.Spec.Strategy.Type != apps.RollingUpdateDeploymentStrategyType {
modified = true
newObj.Spec.Strategy.Type = oldObj.Spec.Strategy.Type
klog.Warningf("Not allow to modify Strategy.Type to Recreate")
}
} else { // default
if !newObj.Spec.Paused {
modified = true
newObj.Spec.Paused = true
}
// Do not allow to modify strategy as Recreate during rolling
if newObj.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
modified = true
@ -369,6 +319,7 @@ func (h *WorkloadHandler) handleCloneSet(newObj, oldObj *kruiseappsv1alpha1.Clon
} else if rollout == nil || rollout.Spec.Strategy.IsEmptyRelease() {
return false, nil
}
// if traffic routing, there must only be one version of Pods
if rollout.Spec.Strategy.HasTrafficRoutings() && newObj.Status.Replicas != newObj.Status.UpdatedReplicas {
klog.Warningf("Because cloneSet(%s/%s) have multiple versions of Pods, so can not enter rollout progressing", newObj.Namespace, newObj.Name)

View File

@ -36,7 +36,6 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
@ -130,6 +129,51 @@ var (
},
}
rsDemoV2 = &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "echoserver-v2",
Labels: map[string]string{
"app": "echoserver",
"pod-template-hash": "verision2",
},
Annotations: map[string]string{},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(deploymentDemo, schema.GroupVersionKind{
Group: apps.SchemeGroupVersion.Group,
Version: apps.SchemeGroupVersion.Version,
Kind: "Deployment",
}),
},
},
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "echoserver",
},
},
Replicas: pointer.Int32(5),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "echoserver",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "echoserver",
Image: "echoserver:v2",
},
},
},
},
},
}
cloneSetDemo = &kruisev1aplphal.CloneSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps.kruise.io/v1alpha1",
@ -519,6 +563,100 @@ func TestHandlerDeployment(t *testing.T) {
return obj
},
},
{
name: "bluegreen: normal release",
getObjs: func() (*apps.Deployment, *apps.Deployment) {
oldObj := deploymentDemo.DeepCopy()
newObj := deploymentDemo.DeepCopy()
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
return oldObj, newObj
},
expectObj: func() *apps.Deployment {
obj := deploymentDemo.DeepCopy()
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
obj.Spec.Paused = true
return obj
},
getRs: func() []*apps.ReplicaSet {
rs := rsDemo.DeepCopy()
return []*apps.ReplicaSet{rs}
},
getRollout: func() *appsv1beta1.Rollout {
obj := rolloutDemo.DeepCopy()
obj.Spec.Strategy.BlueGreen = &appsv1beta1.BlueGreenStrategy{}
return obj
},
isError: false,
},
{
name: "bluegreen: rollback",
getObjs: func() (*apps.Deployment, *apps.Deployment) {
oldObj := deploymentDemo.DeepCopy()
oldObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
oldObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}`
oldObj.Labels[appsv1alpha1.DeploymentStableRevisionLabel] = "5b494f7bf"
oldObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
newObj := deploymentDemo.DeepCopy()
newObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
newObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}`
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v1"
return oldObj, newObj
},
expectObj: func() *apps.Deployment {
obj := deploymentDemo.DeepCopy()
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v1"
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
obj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}`
obj.Spec.Paused = true
return obj
},
getRs: func() []*apps.ReplicaSet {
rs := rsDemo.DeepCopy()
rs2 := rsDemoV2.DeepCopy()
return []*apps.ReplicaSet{rs, rs2}
},
getRollout: func() *appsv1beta1.Rollout {
obj := rolloutDemo.DeepCopy()
obj.Spec.Strategy.BlueGreen = &appsv1beta1.BlueGreenStrategy{}
return obj
},
isError: false,
},
{
name: "bluegreen: successive release",
getObjs: func() (*apps.Deployment, *apps.Deployment) {
oldObj := deploymentDemo.DeepCopy()
oldObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
oldObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}`
oldObj.Labels[appsv1alpha1.DeploymentStableRevisionLabel] = "5b494f7bf"
oldObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
newObj := deploymentDemo.DeepCopy()
newObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
newObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}`
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v3"
return oldObj, newObj
},
expectObj: func() *apps.Deployment {
obj := deploymentDemo.DeepCopy()
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v3"
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
obj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}`
obj.Spec.Paused = true
return obj
},
getRs: func() []*apps.ReplicaSet {
rs := rsDemo.DeepCopy()
rs2 := rsDemoV2.DeepCopy()
return []*apps.ReplicaSet{rs, rs2}
},
getRollout: func() *appsv1beta1.Rollout {
obj := rolloutDemo.DeepCopy()
obj.Spec.Strategy.BlueGreen = &appsv1beta1.BlueGreenStrategy{}
return obj
},
isError: false,
},
}
decoder, _ := admission.NewDecoder(scheme)
@ -542,8 +680,11 @@ func TestHandlerDeployment(t *testing.T) {
oldObj, newObj := cs.getObjs()
_, err := h.handleDeployment(newObj, oldObj)
if cs.isError && err == nil {
t.Fatal("handlerDeployment failed")
if cs.isError {
if err == nil {
t.Fatal("handlerDeployment failed")
}
return //no need to check again
} else if !cs.isError && err != nil {
t.Fatalf(err.Error())
}
@ -683,82 +824,6 @@ func TestHandlerDaemonSet(t *testing.T) {
}
}
func TestHandleStatefulSet(t *testing.T) {
cases := []struct {
name string
getObjs func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet)
expectObj func() *kruiseappsv1beta1.StatefulSet
getRollout func() *appsv1beta1.Rollout
isError bool
}{
{
name: "cloneSet image v1->v2, matched rollout",
getObjs: func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet) {
oldObj := statefulset.DeepCopy()
newObj := statefulset.DeepCopy()
newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
return oldObj, newObj
},
expectObj: func() *kruiseappsv1beta1.StatefulSet {
obj := statefulset.DeepCopy()
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
obj.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32(math.MaxInt16)
return obj
},
getRollout: func() *appsv1beta1.Rollout {
obj := rolloutDemo.DeepCopy()
obj.Spec.WorkloadRef = appsv1beta1.ObjectRef{
APIVersion: "apps.kruise.io/v1beta1",
Kind: "StatefulSet",
Name: "echoserver",
}
return obj
},
},
}
decoder, _ := admission.NewDecoder(scheme)
for _, cs := range cases {
t.Run(cs.name, func(t *testing.T) {
client := fake.NewClientBuilder().WithScheme(scheme).Build()
h := WorkloadHandler{
Client: client,
Decoder: decoder,
Finder: util.NewControllerFinder(client),
}
rollout := cs.getRollout()
if err := client.Create(context.TODO(), rollout); err != nil {
t.Errorf(err.Error())
}
oldObj, newObj := cs.getObjs()
oldO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(oldObj)
newO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(newObj)
oldUnstructured := &unstructured.Unstructured{Object: oldO}
newUnstructured := &unstructured.Unstructured{Object: newO}
oldUnstructured.SetGroupVersionKind(newObj.GroupVersionKind())
newUnstructured.SetGroupVersionKind(newObj.GroupVersionKind())
_, err := h.handleStatefulSetLikeWorkload(newUnstructured, oldUnstructured)
if cs.isError && err == nil {
t.Fatal("handleStatefulSetLikeWorkload failed")
} else if !cs.isError && err != nil {
t.Fatalf(err.Error())
}
newStructured := &kruiseappsv1beta1.StatefulSet{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(newUnstructured.Object, newStructured)
if err != nil {
t.Fatal("DefaultUnstructuredConvert failed")
}
expect := cs.expectObj()
if !reflect.DeepEqual(newStructured, expect) {
by, _ := json.Marshal(newStructured)
t.Fatalf("handlerCloneSet failed, and new(%s)", string(by))
}
})
}
}
func TestCheckWorkloadRule(t *testing.T) {
ctx := context.Background()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,12 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: hpa-dp
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: echoserver
minReplicas: 2
maxReplicas: 6
targetCPUUtilizationPercentage: 1

View File

@ -0,0 +1,24 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: hpa-dp
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: echoserver
behavior:
scaleDown:
stabilizationWindowSeconds: 10
# selectPolicy: Disabled
# scaleUp:
# selectPolicy: Disabled
minReplicas: 2
maxReplicas: 6
metrics:
- type: Resource
resource:
name: cpu
target:
type: AverageValue
averageValue: '1m'

View File

@ -10,21 +10,15 @@ spec:
strategy:
blueGreen:
steps:
- traffic: 20%
replicas: 20%
- replicas: 50%
traffic: 0%
pause: {}
- traffic: 40%
replicas: 40%
pause: {duration: 10}
- traffic: 60%
replicas: 60%
pause: {duration: 10}
- traffic: 80%
replicas: 80%
pause: {duration: 10}
- traffic: 100%
replicas: 100%
pause: {duration: 0}
- replicas: 100%
traffic: 0%
- replicas: 100%
traffic: 50%
- replicas: 100%
traffic: 100%
trafficRoutings:
- service: echoserver
ingress:

View File

@ -0,0 +1,24 @@
apiVersion: rollouts.kruise.io/v1beta1 # we use v1beta1
kind: Rollout
metadata:
name: rollouts-demo
spec:
workloadRef:
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
name: echoserver
strategy:
blueGreen:
steps:
- replicas: 100%
traffic: 0%
pause: {}
- replicas: 100%
traffic: 50%
- replicas: 100%
traffic: 100%
trafficRoutings:
- service: echoserver
ingress:
classType: nginx
name: echoserver