support bluegreen release: Deployment and CloneSet

Signed-off-by: yunbo <yunbo10124scut@gmail.com>
This commit is contained in:
yunbo 2024-11-18 14:32:28 +08:00
parent 056c77dbd2
commit f477b9329b
34 changed files with 5509 additions and 313 deletions

View File

@ -0,0 +1,146 @@
name: E2E-V1Beta1-BlueGreen-1.19
on:
push:
branches:
- master
- release-*
pull_request: {}
workflow_dispatch: {}
env:
# Common versions
GO_VERSION: '1.19'
KIND_IMAGE: 'kindest/node:v1.19.16'
KIND_CLUSTER_NAME: 'ci-testing'
jobs:
rollout:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Setup Kind Cluster
uses: helm/kind-action@v1.2.0
with:
node_image: ${{ env.KIND_IMAGE }}
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
config: ./test/kind-conf.yaml
- name: Build image
run: |
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
docker build --pull --no-cache . -t $IMAGE
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
- name: Install Kruise
run: |
set -ex
kubectl cluster-info
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
echo "Wait for kruise-manager ready successfully"
else
echo "Timeout to wait for kruise-manager ready"
exit 1
fi
- name: Install Kruise Rollout
run: |
set -ex
kubectl cluster-info
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
set -e
if [ "$PODS" -eq "1" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
kubectl get node -o yaml
kubectl get all -n kruise-rollout -o yaml
set -e
if [ "$PODS" -eq "1" ]; then
echo "Wait for kruise-rollout ready successfully"
else
echo "Timeout to wait for kruise-rollout ready"
exit 1
fi
- name: Bluegreen Release Disable HPA
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='bluegreen disable hpa test case - autoscaling/v1 for v1.19' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: Deployment Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: CloneSet Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal

View File

@ -0,0 +1,146 @@
name: E2E-V1Beta1-BlueGreen-1.23
on:
push:
branches:
- master
- release-*
pull_request: {}
workflow_dispatch: {}
env:
# Common versions
GO_VERSION: '1.19'
KIND_IMAGE: 'kindest/node:v1.23.3'
KIND_CLUSTER_NAME: 'ci-testing'
jobs:
rollout:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Setup Kind Cluster
uses: helm/kind-action@v1.2.0
with:
node_image: ${{ env.KIND_IMAGE }}
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
config: ./test/kind-conf.yaml
- name: Build image
run: |
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
docker build --pull --no-cache . -t $IMAGE
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
- name: Install Kruise
run: |
set -ex
kubectl cluster-info
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
echo "Wait for kruise-manager ready successfully"
else
echo "Timeout to wait for kruise-manager ready"
exit 1
fi
- name: Install Kruise Rollout
run: |
set -ex
kubectl cluster-info
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
set -e
if [ "$PODS" -eq "1" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
kubectl get node -o yaml
kubectl get all -n kruise-rollout -o yaml
set -e
if [ "$PODS" -eq "1" ]; then
echo "Wait for kruise-rollout ready successfully"
else
echo "Timeout to wait for kruise-rollout ready"
exit 1
fi
- name: Bluegreen Release Disable HPA
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='bluegreen delete rollout case - autoscaling/v2 for v1.23' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: Deployment Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal
- name: CloneSet Bluegreen Release
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e
retVal=$?
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-rollout has not restarted"
else
kubectl get pod -n kruise-rollout --no-headers
echo "Kruise-rollout has restarted, abort!!!"
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
exit 1
fi
exit $retVal

View File

@ -1,4 +1,4 @@
name: E2E-V1Beta1-1.19
name: E2E-V1Beta1-JUMP-1.19
on:
push:

View File

@ -1,4 +1,4 @@
name: E2E-V1Beta1-1.23
name: E2E-V1Beta1-JUMP-1.23
on:
push:

View File

@ -172,7 +172,9 @@ func (dst *Rollout) ConvertFrom(src conversion.Hub) error {
srcV1beta1 := src.(*v1beta1.Rollout)
dst.ObjectMeta = srcV1beta1.ObjectMeta
if !srcV1beta1.Spec.Strategy.IsCanaryStragegy() {
return fmt.Errorf("v1beta1 Rollout with %s strategy cannot be converted to v1alpha1", srcV1beta1.Spec.Strategy.GetRollingStyle())
// only v1beta1 supports bluegreen strategy
// Don't log the message because it will print too often
return nil
}
// spec
dst.Spec = RolloutSpec{

View File

@ -117,6 +117,8 @@ type BatchReleaseStatus struct {
// Phase is the release plan phase, which indicates the current state of release
// plan state machine in BatchRelease controller.
Phase RolloutPhase `json:"phase,omitempty"`
// Message provides details on why the rollout is in its current phase
Message string `json:"message,omitempty"`
}
type BatchReleaseCanaryStatus struct {

View File

@ -62,31 +62,6 @@ type DeploymentStrategy struct {
Partition intstr.IntOrString `json:"partition,omitempty"`
}
// OriginalDeploymentStrategy stores part of the fileds of a workload,
// so that it can be restored when finalizing.
// It is only used for BlueGreen Release
// Similar to DeploymentStrategy, it is an annotation used in workload
// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy
type OriginalDeploymentStrategy struct {
// The deployment strategy to use to replace existing pods with new ones.
// +optional
// +patchStrategy=retainKeys
Strategy *apps.DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. Defaults to 600s.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
}
type RollingStyleType string
const (
@ -138,44 +113,3 @@ func SetDefaultDeploymentStrategy(strategy *DeploymentStrategy) {
}
}
}
func SetDefaultSetting(setting *OriginalDeploymentStrategy) {
if setting.ProgressDeadlineSeconds == nil {
setting.ProgressDeadlineSeconds = new(int32)
*setting.ProgressDeadlineSeconds = 600
}
if setting.Strategy == nil {
setting.Strategy = &apps.DeploymentStrategy{}
}
if setting.Strategy.Type == "" {
setting.Strategy.Type = apps.RollingUpdateDeploymentStrategyType
}
if setting.Strategy.Type == apps.RecreateDeploymentStrategyType {
return
}
strategy := setting.Strategy
if strategy.RollingUpdate == nil {
strategy.RollingUpdate = &apps.RollingUpdateDeployment{}
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set MaxUnavailable as 25% by default
maxUnavailable := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set MaxSurge as 25% by default
maxSurge := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxSurge
}
// Cannot allow maxSurge==0 && MaxUnavailable==0, otherwise, no pod can be updated when rolling update.
maxSurge, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxSurge, 100, true)
maxUnavailable, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxUnavailable, 100, true)
if maxSurge == 0 && maxUnavailable == 0 {
strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 0},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
}
}
}

View File

@ -422,31 +422,6 @@ func (in *ObjectRef) DeepCopy() *ObjectRef {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OriginalDeploymentStrategy) DeepCopyInto(out *OriginalDeploymentStrategy) {
*out = *in
if in.Strategy != nil {
in, out := &in.Strategy, &out.Strategy
*out = new(v1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalDeploymentStrategy.
func (in *OriginalDeploymentStrategy) DeepCopy() *OriginalDeploymentStrategy {
if in == nil {
return nil
}
out := new(OriginalDeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PatchPodTemplateMetadata) DeepCopyInto(out *PatchPodTemplateMetadata) {
*out = *in

View File

@ -507,6 +507,10 @@ spec:
- type
type: object
type: array
message:
description: Message provides details on why the rollout is in its
current phase
type: string
observedGeneration:
description: ObservedGeneration is the most recent generation observed
for this BatchRelease. It corresponds to this BatchRelease's generation,

View File

@ -161,6 +161,18 @@ rules:
- get
- patch
- update
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:

View File

@ -148,6 +148,7 @@ type BatchReleaseReconciler struct {
// +kubebuilder:rbac:groups=apps.kruise.io,resources=statefulsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// Reconcile reads that state of the cluster for a Rollout object and makes changes based on the state read
// and what is in the Rollout.Spec

View File

@ -24,6 +24,9 @@ import (
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle"
bgcloneset "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/cloneset"
bgdeplopyment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/deployment"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle"
canarydeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle/deployment"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle"
@ -32,6 +35,7 @@ import (
partitiondeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/deployment"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/statefulset"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -145,7 +149,11 @@ func (r *Executor) progressBatches(release *v1beta1.BatchRelease, newStatus *v1b
switch {
case err == nil:
result = reconcile.Result{RequeueAfter: DefaultDuration}
removeProgressingCondition(newStatus)
newStatus.CanaryStatus.CurrentBatchState = v1beta1.VerifyingBatchState
case errors.IsFatal(err):
progressingStateTransition(newStatus, v1.ConditionTrue, v1beta1.ProgressingReasonInRolling, err.Error())
fallthrough
default:
klog.Warningf("Failed to upgrade %v, err %v", klog.KObj(release), err)
}
@ -204,14 +212,14 @@ func (r *Executor) getReleaseController(release *v1beta1.BatchRelease, newStatus
klog.Infof("BatchRelease(%v) using %s-style release controller for this batch release", klog.KObj(release), rollingStyle)
switch rollingStyle {
case v1beta1.BlueGreenRollingStyle:
// if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() {
// klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
// return partitionstyle.NewControlPlane(cloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
// }
// if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() {
// klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
// return bluegreenstyle.NewControlPlane(deployment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
// }
if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() {
klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
return bluegreenstyle.NewControlPlane(bgcloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
}
if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() {
klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace)
return bluegreenstyle.NewControlPlane(bgdeplopyment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil
}
case v1beta1.CanaryRollingStyle:
if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() {
@ -257,3 +265,23 @@ func isPartitioned(release *v1beta1.BatchRelease) bool {
return release.Spec.ReleasePlan.BatchPartition != nil &&
*release.Spec.ReleasePlan.BatchPartition <= release.Status.CanaryStatus.CurrentBatch
}
func progressingStateTransition(status *v1beta1.BatchReleaseStatus, condStatus v1.ConditionStatus, reason, message string) {
cond := util.GetBatchReleaseCondition(*status, v1beta1.RolloutConditionProgressing)
if cond == nil {
cond = util.NewRolloutCondition(v1beta1.RolloutConditionProgressing, condStatus, reason, message)
} else {
cond.Status = condStatus
cond.Reason = reason
if message != "" {
cond.Message = message
}
}
util.SetBatchReleaseCondition(status, *cond)
status.Message = cond.Message
}
func removeProgressingCondition(status *v1beta1.BatchReleaseStatus) {
util.RemoveBatchReleaseCondition(status, v1beta1.RolloutConditionProgressing)
status.Message = ""
}

View File

@ -61,6 +61,9 @@ type BatchContext struct {
Pods []*corev1.Pod `json:"-"`
// filter or sort pods before patch label
FilterFunc FilterFuncType `json:"-"`
// the next two fields are only used for bluegreen style
CurrentSurge intstr.IntOrString `json:"currentSurge,omitempty"`
DesiredSurge intstr.IntOrString `json:"desiredSurge,omitempty"`
}
type FilterFuncType func(pods []*corev1.Pod, ctx *BatchContext) []*corev1.Pod

View File

@ -0,0 +1,42 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package control
import "k8s.io/apimachinery/pkg/util/intstr"
// OriginalDeploymentStrategy stores part of the fileds of a workload,
// so that it can be restored when finalizing.
// It is only used for BlueGreen Release
// Similar to DeploymentStrategy, it is an annotation used in workload
// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy
type OriginalDeploymentStrategy struct {
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
// +optional
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. Defaults to 600s.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
}

View File

@ -0,0 +1,239 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloneset
import (
"context"
"fmt"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
"github.com/openkruise/rollouts/pkg/util/patch"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type realController struct {
*util.WorkloadInfo
client client.Client
pods []*corev1.Pod
key types.NamespacedName
object *kruiseappsv1alpha1.CloneSet
}
func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface {
return &realController{
key: key,
client: cli,
}
}
func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo {
return rc.WorkloadInfo
}
func (rc *realController) BuildController() (bluegreenstyle.Interface, error) {
if rc.object != nil {
return rc, nil
}
object := &kruiseappsv1alpha1.CloneSet{}
if err := rc.client.Get(context.TODO(), rc.key, object); err != nil {
return rc, err
}
rc.object = object
rc.WorkloadInfo = util.ParseWorkload(object)
return rc, nil
}
func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) {
if rc.pods != nil {
return rc.pods, nil
}
var err error
rc.pods, err = util.ListOwnedPods(rc.client, rc.object)
return rc.pods, err
}
func (rc *realController) Initialize(release *v1beta1.BatchRelease) error {
klog.Info("Initialize cloneset controller")
if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) {
return nil
}
// disable the hpa
if err := hpa.DisableHPA(rc.client, rc.object); err != nil {
return err
}
klog.InfoS("Initialize: disable hpa for cloneset successfully", "cloneset", klog.KObj(rc.object))
// patch the cloneset
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error()))
}
control.InitOriginalSetting(&setting, rc.object)
patchData := patch.NewClonesetPatch()
patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting))
patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef(
release, release.GetObjectKind().GroupVersionKind())))
// we use partition = 100% to function as "paused" instead of setting pasued field as true
// it is manily to keep consistency with partition style (partition is already set as 100% in webhook)
patchData.UpdatePaused(false)
maxSurge := intstr.FromInt(1) // select the minimum positive number as initial value
maxUnavailable := intstr.FromInt(0)
patchData.UpdateMaxSurge(&maxSurge)
patchData.UpdateMaxUnavailable(&maxUnavailable)
patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds)
klog.InfoS("Initialize: try to update cloneset", "cloneset", klog.KObj(rc.object), "patchData", patchData.String())
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error {
if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil {
return errors.NewFatalError(fmt.Errorf("cannot upgrade batch, because cloneset %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error()))
}
desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true)
current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true)
if current >= desired {
klog.Infof("No need to upgrade batch for cloneset %v: because current %d >= desired %d", klog.KObj(rc.object), current, desired)
return nil
} else {
klog.Infof("Will update batch for cloneset %v: current %d < desired %d", klog.KObj(rc.object), current, desired)
}
patchData := patch.NewClonesetPatch()
// avoid interference from partition
patchData.UpdatePartiton(nil)
patchData.UpdateMaxSurge(&ctx.DesiredSurge)
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
func (rc *realController) Finalize(release *v1beta1.BatchRelease) error {
if rc.finalized() {
return nil // No need to finalize again
}
if release.Spec.ReleasePlan.BatchPartition != nil {
// continuous release (not supported yet)
/*
patchData := patch.NewClonesetPatch()
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
*/
klog.Warningf("continuous release is not supported yet for bluegreen style release")
return nil
}
c := util.GetEmptyObjectWithKey(rc.object)
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error()))
}
patchData := patch.NewClonesetPatch()
// why we need a simple MinReadySeconds-based status machine? (ie. the if-else block)
// It's possible for Finalize to be called multiple times, if error returned is not nil.
// if we do all needed operations in a single code block, like, A->B->C, when C need retry,
// both A and B will be executed as well, however, operations like restoreHPA cost a lot(which calls LIST API)
if rc.object.Spec.MinReadySeconds != setting.MinReadySeconds {
// restore the hpa
if err := hpa.RestoreHPA(rc.client, rc.object); err != nil {
return err
}
// restore the original setting
patchData.UpdateMinReadySeconds(setting.MinReadySeconds)
patchData.UpdateMaxSurge(setting.MaxSurge)
patchData.UpdateMaxUnavailable(setting.MaxUnavailable)
if err := rc.client.Patch(context.TODO(), c, patchData); err != nil {
return err
}
// we should return an error to trigger re-enqueue, so that we can go to the next if-else branch in the next reconcile
return errors.NewBenignError(fmt.Errorf("cloneset bluegreen: we should wait all pods updated and available"))
} else {
klog.InfoS("Finalize: cloneset bluegreen release: wait all pods updated and ready", "cloneset", klog.KObj(rc.object))
// wait all pods updated and ready
if rc.object.Status.ReadyReplicas != rc.object.Status.UpdatedReadyReplicas {
return errors.NewBenignError(fmt.Errorf("cloneset %v finalize not done, readyReplicas %d != updatedReadyReplicas %d, current policy %s",
klog.KObj(rc.object), rc.object.Status.ReadyReplicas, rc.object.Status.UpdatedReadyReplicas, release.Spec.ReleasePlan.FinalizingPolicy))
}
klog.InfoS("Finalize: cloneset bluegreen release: all pods updated and ready")
// restore annotation
patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation)
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
return rc.client.Patch(context.TODO(), c, patchData)
}
}
func (rc *realController) finalized() bool {
if rc.object == nil || rc.object.DeletionTimestamp != nil {
return true
}
if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 {
return true
}
return false
}
// bluegreen doesn't support rollback in batch, because:
// - bluegreen support traffic rollback instead, rollback in batch is not necessary
// - it's diffcult for both Deployment and CloneSet to support rollback in batch, with the "minReadySeconds" implementation
func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) {
// current batch index
currentBatch := release.Status.CanaryStatus.CurrentBatch
// the number of expected updated pods
desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas
// the number of current updated pods
currentSurge := intstr.FromInt(0)
if rc.object.Spec.UpdateStrategy.MaxSurge != nil {
currentSurge = *rc.object.Spec.UpdateStrategy.MaxSurge
if currentSurge == intstr.FromInt(1) {
// currentSurge == intstr.FromInt(1) means that currentSurge is the initial value
// if the value is indeed set by user, setting it to 0 still does no harm
currentSurge = intstr.FromInt(0)
}
}
desired, _ := intstr.GetScaledValueFromIntOrPercent(&desiredSurge, int(rc.Replicas), true)
batchContext := &batchcontext.BatchContext{
Pods: rc.pods,
RolloutID: release.Spec.ReleasePlan.RolloutID,
CurrentBatch: currentBatch,
UpdateRevision: release.Status.UpdateRevision,
DesiredSurge: desiredSurge,
CurrentSurge: currentSurge,
// the following fields isused to check if batch is ready
Replicas: rc.Replicas,
UpdatedReplicas: rc.Status.UpdatedReplicas,
UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas,
DesiredUpdatedReplicas: int32(desired),
PlannedUpdatedReplicas: int32(desired),
}
// the number of no need update pods that marked before rollout
// if noNeedUpdate := release.Status.CanaryStatus.NoNeedUpdateReplicas; noNeedUpdate != nil {
// batchContext.FilterFunc = labelpatch.FilterPodsForUnorderedUpdate
// }
return batchContext, nil
}

View File

@ -0,0 +1,394 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloneset
import (
"context"
"encoding/json"
"reflect"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
rolloutapi "github.com/openkruise/rollouts/api"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/util"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var (
scheme = runtime.NewScheme()
cloneKey = types.NamespacedName{
Namespace: "default",
Name: "cloneset",
}
cloneDemo = &kruiseappsv1alpha1.CloneSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "CloneSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: cloneKey.Name,
Namespace: cloneKey.Namespace,
Generation: 1,
Labels: map[string]string{
"app": "busybox",
},
Annotations: map[string]string{
"type": "unit-test",
},
},
Spec: kruiseappsv1alpha1.CloneSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "busybox",
},
},
Replicas: pointer.Int32(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
Paused: true,
Partition: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "busybox",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "busybox",
Image: "busybox:latest",
},
},
},
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 10,
UpdatedReplicas: 0,
ReadyReplicas: 10,
AvailableReplicas: 10,
UpdatedReadyReplicas: 0,
UpdateRevision: "version-2",
CurrentRevision: "version-1",
ObservedGeneration: 1,
CollisionCount: pointer.Int32Ptr(1),
},
}
releaseDemo = &v1beta1.BatchRelease{
TypeMeta: metav1.TypeMeta{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "BatchRelease",
},
ObjectMeta: metav1.ObjectMeta{
Name: "release",
Namespace: cloneKey.Namespace,
UID: uuid.NewUUID(),
},
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.FromString("10%"),
},
{
CanaryReplicas: intstr.FromString("50%"),
},
{
CanaryReplicas: intstr.FromString("100%"),
},
},
},
WorkloadRef: v1beta1.ObjectRef{
APIVersion: cloneDemo.APIVersion,
Kind: cloneDemo.Kind,
Name: cloneDemo.Name,
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 0,
},
},
}
)
func init() {
apps.AddToScheme(scheme)
rolloutapi.AddToScheme(scheme)
kruiseappsv1alpha1.AddToScheme(scheme)
}
func TestCalculateBatchContext(t *testing.T) {
RegisterFailHandler(Fail)
cases := map[string]struct {
workload func() *kruiseappsv1alpha1.CloneSet
release func() *v1beta1.BatchRelease
result *batchcontext.BatchContext
}{
"normal case batch0": {
workload: func() *kruiseappsv1alpha1.CloneSet {
return &kruiseappsv1alpha1.CloneSet{
Spec: kruiseappsv1alpha1.CloneSetSpec{
Replicas: pointer.Int32Ptr(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
MaxSurge: func() *intstr.IntOrString { p := intstr.FromInt(1); return &p }(),
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 10,
UpdatedReplicas: 0,
UpdatedReadyReplicas: 0,
AvailableReplicas: 10,
},
}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 0,
},
UpdateRevision: "update-version",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 0,
DesiredSurge: intstr.FromString("50%"),
CurrentSurge: intstr.FromInt(0),
Replicas: 10,
UpdatedReplicas: 0,
UpdatedReadyReplicas: 0,
UpdateRevision: "update-version",
PlannedUpdatedReplicas: 5,
DesiredUpdatedReplicas: 5,
},
},
"normal case batch1": {
workload: func() *kruiseappsv1alpha1.CloneSet {
return &kruiseappsv1alpha1.CloneSet{
Spec: kruiseappsv1alpha1.CloneSetSpec{
Replicas: pointer.Int32(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("50%"); return &p }(),
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 15,
UpdatedReplicas: 5,
UpdatedReadyReplicas: 5,
AvailableReplicas: 10,
},
}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
UpdateRevision: "update-version",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 1,
DesiredSurge: intstr.FromString("100%"),
CurrentSurge: intstr.FromString("50%"),
Replicas: 10,
UpdatedReplicas: 5,
UpdatedReadyReplicas: 5,
UpdateRevision: "update-version",
PlannedUpdatedReplicas: 10,
DesiredUpdatedReplicas: 10,
},
},
"normal case batch2": {
workload: func() *kruiseappsv1alpha1.CloneSet {
return &kruiseappsv1alpha1.CloneSet{
Spec: kruiseappsv1alpha1.CloneSetSpec{
Replicas: pointer.Int32Ptr(10),
UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{
MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(),
},
},
Status: kruiseappsv1alpha1.CloneSetStatus{
Replicas: 20,
UpdatedReplicas: 10,
UpdatedReadyReplicas: 10,
AvailableReplicas: 10,
ReadyReplicas: 20,
},
}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
Batches: []v1beta1.ReleaseBatch{
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
{CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 2,
},
UpdateRevision: "update-version",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 2,
UpdateRevision: "update-version",
DesiredSurge: intstr.FromString("100%"),
CurrentSurge: intstr.FromString("100%"),
Replicas: 10,
UpdatedReplicas: 10,
UpdatedReadyReplicas: 10,
PlannedUpdatedReplicas: 10,
DesiredUpdatedReplicas: 10,
},
},
}
for name, cs := range cases {
t.Run(name, func(t *testing.T) {
control := realController{
object: cs.workload(),
WorkloadInfo: util.ParseWorkload(cs.workload()),
}
got, err := control.CalculateBatchContext(cs.release())
Expect(err).NotTo(HaveOccurred())
Expect(got.Log()).Should(Equal(cs.result.Log()))
})
}
}
func TestRealController(t *testing.T) {
RegisterFailHandler(Fail)
release := releaseDemo.DeepCopy()
clone := cloneDemo.DeepCopy()
// for unit test we should set some default value since no webhook or controller is working
clone.Spec.UpdateStrategy.Type = kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone).Build()
// build new controller
c := NewController(cli, cloneKey, clone.GroupVersionKind()).(*realController)
controller, err := c.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Initialize
err = controller.Initialize(release)
Expect(err).NotTo(HaveOccurred())
fetch := &kruiseappsv1alpha1.CloneSet{}
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
// check strategy
Expect(fetch.Spec.UpdateStrategy.Type).Should(Equal(kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType))
// partition is set to 100% in webhook, therefore we cannot observe it in unit test
// Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.Partition, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue())
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
// check annotations
Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release)))
Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"},
MinReadySeconds: 0,
})))
c.object = fetch // mock
for {
batchContext, err := controller.CalculateBatchContext(release)
Expect(err).NotTo(HaveOccurred())
err = controller.UpgradeBatch(batchContext)
fetch = &kruiseappsv1alpha1.CloneSet{}
// mock
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
c.object = fetch
if err == nil {
break
}
}
fetch = &kruiseappsv1alpha1.CloneSet{}
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("10%"))
Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(0)))
Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false))
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"},
MinReadySeconds: 0,
})))
controller.Finalize(release)
fetch = &kruiseappsv1alpha1.CloneSet{}
Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred())
Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("0%"))
Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(1)))
Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false))
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0)))
}
func getControlInfo(release *v1beta1.BatchRelease) string {
owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind()))
return string(owner)
}

View File

@ -0,0 +1,178 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bluegreenstyle
import (
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/labelpatch"
"github.com/openkruise/rollouts/pkg/util"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type realBatchControlPlane struct {
Interface
client.Client
record.EventRecorder
patcher labelpatch.LabelPatcher
release *v1beta1.BatchRelease
newStatus *v1beta1.BatchReleaseStatus
}
type NewInterfaceFunc func(cli client.Client, key types.NamespacedName, gvk schema.GroupVersionKind) Interface
// NewControlPlane creates a new release controller with bluegreen-style to drive batch release state machine
func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.EventRecorder, release *v1beta1.BatchRelease, newStatus *v1beta1.BatchReleaseStatus, key types.NamespacedName, gvk schema.GroupVersionKind) *realBatchControlPlane {
return &realBatchControlPlane{
Client: cli,
EventRecorder: recorder,
newStatus: newStatus,
Interface: f(cli, key, gvk),
release: release.DeepCopy(),
patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)),
}
}
func (rc *realBatchControlPlane) Initialize() error {
controller, err := rc.BuildController()
if err != nil {
return err
}
// claim workload under our control
err = controller.Initialize(rc.release)
if err != nil {
return err
}
// record revision and replicas
workloadInfo := controller.GetWorkloadInfo()
rc.newStatus.StableRevision = workloadInfo.Status.StableRevision
rc.newStatus.UpdateRevision = workloadInfo.Status.UpdateRevision
rc.newStatus.ObservedWorkloadReplicas = workloadInfo.Replicas
return err
}
func (rc *realBatchControlPlane) UpgradeBatch() error {
controller, err := rc.BuildController()
if err != nil {
return err
}
if controller.GetWorkloadInfo().Replicas == 0 {
return nil
}
batchContext, err := controller.CalculateBatchContext(rc.release)
if err != nil {
return err
}
klog.Infof("BatchRelease %v calculated context when upgrade batch: %s",
klog.KObj(rc.release), batchContext.Log())
err = controller.UpgradeBatch(batchContext)
if err != nil {
return err
}
return nil
}
func (rc *realBatchControlPlane) CheckBatchReady() error {
controller, err := rc.BuildController()
if err != nil {
return err
}
if controller.GetWorkloadInfo().Replicas == 0 {
return nil
}
// do not countAndUpdateNoNeedUpdateReplicas when checking,
// the target calculated should be consistent with UpgradeBatch.
batchContext, err := controller.CalculateBatchContext(rc.release)
if err != nil {
return err
}
klog.Infof("BatchRelease %v calculated context when check batch ready: %s",
klog.KObj(rc.release), batchContext.Log())
return batchContext.IsBatchReady()
}
func (rc *realBatchControlPlane) Finalize() error {
controller, err := rc.BuildController()
if err != nil {
return client.IgnoreNotFound(err)
}
// release workload control info and clean up resources if it needs
return controller.Finalize(rc.release)
}
func (rc *realBatchControlPlane) SyncWorkloadInformation() (control.WorkloadEventType, *util.WorkloadInfo, error) {
// ignore the sync if the release plan is deleted
if rc.release.DeletionTimestamp != nil {
return control.WorkloadNormalState, nil, nil
}
controller, err := rc.BuildController()
if err != nil {
if errors.IsNotFound(err) {
return control.WorkloadHasGone, nil, err
}
return control.WorkloadUnknownState, nil, err
}
workloadInfo := controller.GetWorkloadInfo()
if !workloadInfo.IsStable() {
klog.Infof("Workload(%v) still reconciling, waiting for it to complete, generation: %v, observed: %v",
workloadInfo.LogKey, workloadInfo.Generation, workloadInfo.Status.ObservedGeneration)
return control.WorkloadStillReconciling, workloadInfo, nil
}
if workloadInfo.IsPromoted() {
klog.Infof("Workload(%v) has been promoted, no need to rollout again actually, replicas: %v, updated: %v",
workloadInfo.LogKey, workloadInfo.Replicas, workloadInfo.Status.UpdatedReadyReplicas)
return control.WorkloadNormalState, workloadInfo, nil
}
if workloadInfo.IsScaling(rc.newStatus.ObservedWorkloadReplicas) {
klog.Warningf("Workload(%v) replicas is modified, replicas from: %v to -> %v",
workloadInfo.LogKey, rc.newStatus.ObservedWorkloadReplicas, workloadInfo.Replicas)
return control.WorkloadReplicasChanged, workloadInfo, nil
}
if workloadInfo.IsRollback(rc.newStatus.StableRevision, rc.newStatus.UpdateRevision) {
klog.Warningf("Workload(%v) is rolling back", workloadInfo.LogKey)
return control.WorkloadRollbackInBatch, workloadInfo, nil
}
if workloadInfo.IsRevisionNotEqual(rc.newStatus.UpdateRevision) {
klog.Warningf("Workload(%v) updateRevision is modified, updateRevision from: %v to -> %v",
workloadInfo.LogKey, rc.newStatus.UpdateRevision, workloadInfo.Status.UpdateRevision)
return control.WorkloadPodTemplateChanged, workloadInfo, nil
}
return control.WorkloadNormalState, workloadInfo, nil
}

View File

@ -0,0 +1,303 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"context"
"fmt"
"github.com/openkruise/rollouts/api/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle"
"github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa"
deploymentutil "github.com/openkruise/rollouts/pkg/controller/deployment/util"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
"github.com/openkruise/rollouts/pkg/util/patch"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type realController struct {
*util.WorkloadInfo
client client.Client
pods []*corev1.Pod
key types.NamespacedName
object *apps.Deployment
}
func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface {
return &realController{
key: key,
client: cli,
}
}
func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo {
return rc.WorkloadInfo
}
func (rc *realController) BuildController() (bluegreenstyle.Interface, error) {
if rc.object != nil {
return rc, nil
}
object := &apps.Deployment{}
if err := rc.client.Get(context.TODO(), rc.key, object); err != nil {
return rc, err
}
rc.object = object
rc.WorkloadInfo = rc.getWorkloadInfo(object)
return rc, nil
}
func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) {
if rc.pods != nil {
return rc.pods, nil
}
var err error
rc.pods, err = util.ListOwnedPods(rc.client, rc.object)
return rc.pods, err
}
// Add OriginalDeploymentStrategyAnnotation to workload
func (rc *realController) Initialize(release *v1beta1.BatchRelease) error {
if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) {
return nil
}
// disable the hpa
if err := hpa.DisableHPA(rc.client, rc.object); err != nil {
return err
}
klog.InfoS("Initialize: disable hpa for deployment successfully", "deployment", klog.KObj(rc.object))
// update the deployment
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error()))
}
control.InitOriginalSetting(&setting, rc.object)
klog.InfoS("Initialize deployment", "deployment", klog.KObj(rc.object), "setting", util.DumpJSON(&setting))
patchData := patch.NewDeploymentPatch()
patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting))
patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef(
release, release.GetObjectKind().GroupVersionKind())))
// update: MinReadySeconds, ProgressDeadlineSeconds, MaxSurge, MaxUnavailable
patchData.UpdateStrategy(apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 0},
},
})
patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds)
patchData.UpdateProgressDeadlineSeconds(utilpointer.Int32(v1beta1.MaxProgressSeconds))
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error {
if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil {
return errors.NewFatalError(fmt.Errorf("cannot upgrade batch, because deployment %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error()))
}
desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true)
current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true)
if current >= desired {
klog.Infof("No need to upgrade batch for deployment %v: because current %d >= desired %d", klog.KObj(rc.object), current, desired)
return nil
}
klog.Infof("Ready to upgrade batch for deployment %v: current %d < desired %d", klog.KObj(rc.object), current, desired)
patchData := patch.NewDeploymentPatch()
// different with canary release, bluegreen don't need to set pause in the process of rollout
patchData.UpdatePaused(false)
patchData.UpdateStrategy(apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: &ctx.DesiredSurge,
MaxUnavailable: &intstr.IntOrString{},
},
})
return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData)
}
// set pause to false, restore the original setting, delete annotation
func (rc *realController) Finalize(release *v1beta1.BatchRelease) error {
if rc.finalized() {
return nil // No need to finalize again.
}
if release.Spec.ReleasePlan.BatchPartition != nil {
// continuous release (not supported yet)
/*
patchData := patch.NewDeploymentPatch()
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
if err := rc.client.Patch(context.TODO(), d, patchData); err != nil {
return err
}
*/
klog.Warningf("continuous release is not supported yet for bluegreen style release")
return nil
}
d := util.GetEmptyObjectWithKey(rc.object)
setting, err := control.GetOriginalSetting(rc.object)
if err != nil {
return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error()))
}
patchData := patch.NewDeploymentPatch()
// why we need a simple MinReadySeconds-based status machine? (ie. the if-else block)
// It's possible for Finalize to be called multiple times, if error returned is not nil.
// if we do all needed operations in a single code block, like, A->B->C, when C need retry,
// both A and B will be executed as well, however, operations like restoreHPA cost a lot(which calls LIST API)
if rc.object.Spec.MinReadySeconds != setting.MinReadySeconds {
// restore the hpa
if err := hpa.RestoreHPA(rc.client, rc.object); err != nil {
return err
}
// restore the original setting
patchData.UpdatePaused(false)
patchData.UpdateMinReadySeconds(setting.MinReadySeconds)
patchData.UpdateProgressDeadlineSeconds(setting.ProgressDeadlineSeconds)
patchData.UpdateMaxSurge(setting.MaxSurge)
patchData.UpdateMaxUnavailable(setting.MaxUnavailable)
if err := rc.client.Patch(context.TODO(), d, patchData); err != nil {
return err
}
// we should return an error to trigger re-enqueue, so that we can go to the next if-else branch in the next reconcile
return errors.NewBenignError(fmt.Errorf("deployment bluegreen: we should wait all pods updated and available"))
} else {
klog.InfoS("Finalize: deployment bluegreen release: wait all pods updated and ready", "cloneset", klog.KObj(rc.object))
// wait all pods updated and ready
if err := waitAllUpdatedAndReady(d.(*apps.Deployment)); err != nil {
return errors.NewBenignError(err)
}
klog.InfoS("Finalize: deployment is ready to resume, restore the original setting", "deployment", klog.KObj(rc.object))
// restore label and annotation
patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation)
patchData.DeleteLabel(v1alpha1.DeploymentStableRevisionLabel)
patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation)
return rc.client.Patch(context.TODO(), d, patchData)
}
}
func (rc *realController) finalized() bool {
if rc.object == nil || rc.object.DeletionTimestamp != nil {
return true
}
if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 {
return true
}
return false
}
func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) {
currentBatch := release.Status.CanaryStatus.CurrentBatch
desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas
PlannedUpdatedReplicas := deploymentutil.NewRSReplicasLimit(desiredSurge, rc.object)
currentSurge := intstr.FromInt(0)
if rc.object.Spec.Strategy.RollingUpdate != nil && rc.object.Spec.Strategy.RollingUpdate.MaxSurge != nil {
currentSurge = *rc.object.Spec.Strategy.RollingUpdate.MaxSurge
if currentSurge == intstr.FromInt(1) {
// currentSurge == intstr.FromInt(1) means that currentSurge is the initial value
// if the value is indeed set by user, setting it to 0 still does no harm
currentSurge = intstr.FromInt(0)
}
}
return &batchcontext.BatchContext{
Pods: rc.pods,
RolloutID: release.Spec.ReleasePlan.RolloutID,
CurrentBatch: currentBatch,
CurrentSurge: currentSurge,
DesiredSurge: desiredSurge,
UpdateRevision: release.Status.UpdateRevision,
Replicas: rc.Replicas,
UpdatedReplicas: rc.Status.UpdatedReplicas,
UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas,
PlannedUpdatedReplicas: PlannedUpdatedReplicas,
DesiredUpdatedReplicas: PlannedUpdatedReplicas,
}, nil
}
func (rc *realController) getWorkloadInfo(d *apps.Deployment) *util.WorkloadInfo {
workloadInfo := util.ParseWorkload(d)
workloadInfo.Status.UpdatedReadyReplicas = 0
if res, err := rc.getUpdatedReadyReplicas(d); err == nil {
workloadInfo.Status.UpdatedReadyReplicas = res
}
workloadInfo.Status.StableRevision = d.Labels[v1alpha1.DeploymentStableRevisionLabel]
return workloadInfo
}
func (rc *realController) getUpdatedReadyReplicas(d *apps.Deployment) (int32, error) {
rss := &apps.ReplicaSetList{}
listOpts := []client.ListOption{
client.InNamespace(d.Namespace),
client.MatchingLabels(d.Spec.Selector.MatchLabels),
client.UnsafeDisableDeepCopy,
}
if err := rc.client.List(context.TODO(), rss, listOpts...); err != nil {
klog.Warningf("getWorkloadInfo failed, because"+"%s", err.Error())
return -1, err
}
allRSs := rss.Items
// select rs owner by current deployment
ownedRSs := make([]*apps.ReplicaSet, 0)
for i := range allRSs {
rs := &allRSs[i]
if !rs.DeletionTimestamp.IsZero() {
continue
}
if metav1.IsControlledBy(rs, d) {
ownedRSs = append(ownedRSs, rs)
}
}
newRS := deploymentutil.FindNewReplicaSet(d, ownedRSs)
updatedReadyReplicas := int32(0)
// if newRS is nil, it means the replicaset hasn't been created (because the deployment is paused)
// therefore we can return 0 directly
if newRS != nil {
updatedReadyReplicas = newRS.Status.ReadyReplicas
}
return updatedReadyReplicas, nil
}
func waitAllUpdatedAndReady(deployment *apps.Deployment) error {
if deployment.Spec.Paused {
return fmt.Errorf("deployment should not be paused")
}
// ALL pods updated AND ready
if deployment.Status.ReadyReplicas != deployment.Status.UpdatedReplicas {
return fmt.Errorf("all ready replicas should be updated, and all updated replicas should be ready")
}
availableReplicas := deployment.Status.AvailableReplicas
allowedUnavailable := util.DeploymentMaxUnavailable(deployment)
if allowedUnavailable+availableReplicas < deployment.Status.Replicas {
return fmt.Errorf("ready replicas should satisfy maxUnavailable")
}
return nil
}

View File

@ -0,0 +1,541 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strconv"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
rolloutapi "github.com/openkruise/rollouts/api"
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control"
"github.com/openkruise/rollouts/pkg/util"
"github.com/openkruise/rollouts/pkg/util/errors"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var (
scheme = runtime.NewScheme()
deploymentKey = types.NamespacedName{
Name: "deployment",
Namespace: "default",
}
deploymentDemo = &apps.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploymentKey.Name,
Namespace: deploymentKey.Namespace,
Generation: 1,
Labels: map[string]string{
"app": "busybox",
},
Annotations: map[string]string{
"type": "unit-test",
},
},
Spec: apps.DeploymentSpec{
Paused: true,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "busybox",
},
},
Replicas: pointer.Int32(10),
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"},
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "busybox",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "busybox",
Image: "busybox:latest",
},
},
},
},
},
Status: apps.DeploymentStatus{
Replicas: 10,
UpdatedReplicas: 0,
ReadyReplicas: 10,
AvailableReplicas: 10,
CollisionCount: pointer.Int32Ptr(1),
ObservedGeneration: 1,
},
}
deploymentDemo2 = &apps.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "deployment",
Namespace: "default",
UID: types.UID("87076677"),
Generation: 2,
Labels: map[string]string{
"app": "busybox",
apps.DefaultDeploymentUniqueLabelKey: "update-pod-hash",
},
},
Spec: apps.DeploymentSpec{
Replicas: pointer.Int32Ptr(10),
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(0)},
},
},
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "busybox",
},
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: containers("v2"),
},
},
},
Status: apps.DeploymentStatus{
Replicas: 10,
ReadyReplicas: 10,
UpdatedReplicas: 0,
AvailableReplicas: 10,
},
}
releaseDemo = &v1beta1.BatchRelease{
TypeMeta: metav1.TypeMeta{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "BatchRelease",
},
ObjectMeta: metav1.ObjectMeta{
Name: "release",
Namespace: deploymentKey.Namespace,
UID: uuid.NewUUID(),
},
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.FromString("10%"),
},
{
CanaryReplicas: intstr.FromString("50%"),
},
{
CanaryReplicas: intstr.FromString("100%"),
},
},
},
WorkloadRef: v1beta1.ObjectRef{
APIVersion: deploymentDemo.APIVersion,
Kind: deploymentDemo.Kind,
Name: deploymentDemo.Name,
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
},
}
)
func init() {
apps.AddToScheme(scheme)
rolloutapi.AddToScheme(scheme)
kruiseappsv1alpha1.AddToScheme(scheme)
}
func TestCalculateBatchContext(t *testing.T) {
RegisterFailHandler(Fail)
cases := map[string]struct {
workload func() []client.Object
release func() *v1beta1.BatchRelease
result *batchcontext.BatchContext
}{
"noraml case": {
workload: func() []client.Object {
deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment)
deployment.Status = apps.DeploymentStatus{
Replicas: 15,
UpdatedReplicas: 5,
AvailableReplicas: 12,
ReadyReplicas: 12,
}
// current partition, ie. maxSurge
deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "50%"}
deployment.Spec.Replicas = pointer.Int32Ptr(10)
newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
newRss.Status.ReadyReplicas = 2
return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"},
},
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"},
},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
UpdateRevision: "version-2",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 1,
UpdateRevision: "version-2",
DesiredSurge: intstr.IntOrString{Type: intstr.String, StrVal: "100%"},
CurrentSurge: intstr.IntOrString{Type: intstr.String, StrVal: "50%"},
Replicas: 10,
UpdatedReplicas: 5,
UpdatedReadyReplicas: 2,
PlannedUpdatedReplicas: 10,
DesiredUpdatedReplicas: 10,
},
},
"maxSurge=99%, replicas=5": {
workload: func() []client.Object {
deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment)
deployment.Status = apps.DeploymentStatus{
Replicas: 9,
UpdatedReplicas: 4,
AvailableReplicas: 9,
ReadyReplicas: 9,
}
deployment.Spec.Replicas = pointer.Int32Ptr(5)
// current partition, ie. maxSurge
deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "90%"}
newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
newRss.Status.ReadyReplicas = 4
return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)}
},
release: func() *v1beta1.BatchRelease {
r := &v1beta1.BatchRelease{
Spec: v1beta1.BatchReleaseSpec{
ReleasePlan: v1beta1.ReleasePlan{
FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
Batches: []v1beta1.ReleaseBatch{
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "90%"},
},
{
CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "99%"},
},
},
},
},
Status: v1beta1.BatchReleaseStatus{
CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
CurrentBatch: 1,
},
UpdateRevision: "version-2",
},
}
return r
},
result: &batchcontext.BatchContext{
CurrentBatch: 1,
UpdateRevision: "version-2",
DesiredSurge: intstr.FromString("99%"),
CurrentSurge: intstr.FromString("90%"),
Replicas: 5,
UpdatedReplicas: 4,
UpdatedReadyReplicas: 4,
PlannedUpdatedReplicas: 4,
DesiredUpdatedReplicas: 4,
},
},
// test case for continuous release
// "maxSurge=100%, but it is initialized value": {
// workload: func() []client.Object {
// deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment)
// deployment.Status = apps.DeploymentStatus{
// Replicas: 10,
// UpdatedReplicas: 0,
// AvailableReplicas: 10,
// ReadyReplicas: 10,
// }
// // current partition, ie. maxSurge
// deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
// newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet)
// newRss.Status.ReadyReplicas = 0
// return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)}
// },
// release: func() *v1beta1.BatchRelease {
// r := &v1beta1.BatchRelease{
// Spec: v1beta1.BatchReleaseSpec{
// ReleasePlan: v1beta1.ReleasePlan{
// FailureThreshold: &percent,
// FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType,
// Batches: []v1beta1.ReleaseBatch{
// {
// CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"},
// },
// },
// },
// },
// Status: v1beta1.BatchReleaseStatus{
// CanaryStatus: v1beta1.BatchReleaseCanaryStatus{
// CurrentBatch: 0,
// },
// UpdateRevision: "version-2",
// },
// }
// return r
// },
// result: &batchcontext.BatchContext{
// CurrentBatch: 0,
// UpdateRevision: "version-2",
// DesiredPartition: intstr.FromString("50%"),
// FailureThreshold: &percent,
// CurrentPartition: intstr.FromString("0%"), // mainly check this
// Replicas: 10,
// UpdatedReplicas: 0,
// UpdatedReadyReplicas: 0,
// PlannedUpdatedReplicas: 5,
// DesiredUpdatedReplicas: 5,
// },
// },
}
for name, cs := range cases {
t.Run(name, func(t *testing.T) {
cliBuilder := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cs.workload()...)
cli := cliBuilder.Build()
control := realController{
client: cli,
key: deploymentKey,
}
_, err := control.BuildController()
Expect(err).NotTo(HaveOccurred())
got, err := control.CalculateBatchContext(cs.release())
Expect(err).NotTo(HaveOccurred())
fmt.Printf("expect %s, but got %s", cs.result.Log(), got.Log())
Expect(got.Log()).Should(Equal(cs.result.Log()))
})
}
}
func TestRealController(t *testing.T) {
RegisterFailHandler(Fail)
release := releaseDemo.DeepCopy()
clone := deploymentDemo.DeepCopy()
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone).Build()
// build new controller
c := NewController(cli, deploymentKey, clone.GroupVersionKind()).(*realController)
controller, err := c.BuildController()
Expect(err).NotTo(HaveOccurred())
// call Initialize
err = controller.Initialize(release)
Expect(err).NotTo(HaveOccurred())
fetch := &apps.Deployment{}
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
// check strategy
Expect(fetch.Spec.Paused).Should(BeTrue())
Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType))
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue())
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds)))
Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds)))
// check annotations
Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release)))
fmt.Println(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation])
Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{
MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"},
MinReadySeconds: 0,
ProgressDeadlineSeconds: pointer.Int32(600),
})))
c.object = fetch // mock
for {
batchContext, err := controller.CalculateBatchContext(release)
Expect(err).NotTo(HaveOccurred())
err = controller.UpgradeBatch(batchContext)
fetch := &apps.Deployment{}
// mock
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
c.object = fetch
if err == nil {
break
}
}
fetch = &apps.Deployment{}
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
// currentBatch is 1, which means br is in the second batch, maxSurge is 50%
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue())
release.Spec.ReleasePlan.BatchPartition = nil
err = controller.Finalize(release)
Expect(errors.IsBenign(err)).Should(BeTrue())
fetch = &apps.Deployment{}
Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred())
// check workload strategy
Expect(fetch.Spec.Paused).Should(BeFalse())
Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType))
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "20%"})).Should(BeTrue())
Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue())
Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0)))
Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600)))
}
func getControlInfo(release *v1beta1.BatchRelease) string {
owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind()))
return string(owner)
}
func makeCanaryReplicaSets(d client.Object) client.Object {
deploy := d.(*apps.Deployment)
labels := deploy.Spec.Selector.DeepCopy().MatchLabels
labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(&deploy.Spec.Template, nil)
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "ReplicaSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploy.Name + rand.String(5),
Namespace: deploy.Namespace,
UID: uuid.NewUUID(),
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(deploy, deploy.GroupVersionKind()),
},
},
Spec: apps.ReplicaSetSpec{
Replicas: deploy.Spec.Replicas,
Selector: deploy.Spec.Selector.DeepCopy(),
Template: *deploy.Spec.Template.DeepCopy(),
},
}
}
func makeStableReplicaSets(d client.Object) client.Object {
deploy := d.(*apps.Deployment)
stableTemplate := deploy.Spec.Template.DeepCopy()
stableTemplate.Spec.Containers = containers("v1")
labels := deploy.Spec.Selector.DeepCopy().MatchLabels
labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(stableTemplate, nil)
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
APIVersion: apps.SchemeGroupVersion.String(),
Kind: "ReplicaSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploy.Name + rand.String(5),
Namespace: deploy.Namespace,
UID: uuid.NewUUID(),
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(deploy, deploy.GroupVersionKind()),
},
},
Spec: apps.ReplicaSetSpec{
Replicas: deploy.Spec.Replicas,
Selector: deploy.Spec.Selector.DeepCopy(),
Template: *stableTemplate,
},
}
}
func containers(version string) []corev1.Container {
return []corev1.Container{
{
Name: "busybox",
Image: fmt.Sprintf("busybox:%v", version),
},
}
}
func getStableWithReady(workload client.Object, version string) client.Object {
switch workload.(type) {
case *apps.Deployment:
deploy := workload.(*apps.Deployment)
d := deploy.DeepCopy()
d.Spec.Paused = true
d.ResourceVersion = strconv.Itoa(rand.Intn(100000000000))
d.Spec.Template.Spec.Containers = containers(version)
d.Status.ObservedGeneration = deploy.Generation
return d
case *kruiseappsv1alpha1.CloneSet:
clone := workload.(*kruiseappsv1alpha1.CloneSet)
c := clone.DeepCopy()
c.ResourceVersion = strconv.Itoa(rand.Intn(100000000000))
c.Spec.UpdateStrategy.Paused = true
c.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
c.Spec.Template.Spec.Containers = containers(version)
c.Status.ObservedGeneration = clone.Generation
return c
}
return nil
}

View File

@ -0,0 +1,106 @@
package hpa
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
HPADisableSuffix = "-DisableByRollout"
)
func DisableHPA(cli client.Client, object client.Object) error {
hpa := findHPAForWorkload(cli, object)
if hpa == nil {
return nil
}
targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef")
if err != nil || !found {
return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error())
}
ref := targetRef.(map[string]interface{})
name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string)
if !strings.HasSuffix(name, HPADisableSuffix) {
body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, addSuffix(name))
if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
return fmt.Errorf("failed to disable HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error())
}
}
return nil
}
func RestoreHPA(cli client.Client, object client.Object) error {
hpa := findHPAForWorkload(cli, object)
if hpa == nil {
return nil
}
targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef")
if err != nil || !found {
return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error())
}
ref := targetRef.(map[string]interface{})
name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string)
if strings.HasSuffix(name, HPADisableSuffix) {
body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, removeSuffix(name))
if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil {
return fmt.Errorf("failed to restore HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error())
}
}
return nil
}
func findHPAForWorkload(cli client.Client, object client.Object) *unstructured.Unstructured {
hpa := findHPA(cli, object, "v2")
if hpa != nil {
return hpa
}
return findHPA(cli, object, "v1")
}
func findHPA(cli client.Client, object client.Object, version string) *unstructured.Unstructured {
unstructuredList := &unstructured.UnstructuredList{}
hpaGvk := schema.GroupVersionKind{Group: "autoscaling", Kind: "HorizontalPodAutoscaler", Version: version}
unstructuredList.SetGroupVersionKind(hpaGvk)
if err := cli.List(context.TODO(), unstructuredList, &client.ListOptions{Namespace: object.GetNamespace()}); err != nil {
klog.Warningf("Get HPA for workload %v failed, because %s", klog.KObj(object), err.Error())
return nil
}
klog.Infof("Get %d HPA with %s in namespace %s in total", len(unstructuredList.Items), version, object.GetNamespace())
for _, item := range unstructuredList.Items {
scaleTargetRef, found, err := unstructured.NestedFieldCopy(item.Object, "spec", "scaleTargetRef")
if err != nil || !found {
continue
}
ref := scaleTargetRef.(map[string]interface{})
name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string)
if version == object.GetObjectKind().GroupVersionKind().GroupVersion().String() &&
kind == object.GetObjectKind().GroupVersionKind().Kind &&
removeSuffix(name) == object.GetName() {
return &item
}
}
klog.Infof("No HPA found for workload %v", klog.KObj(object))
return nil
}
func addSuffix(HPARefName string) string {
if strings.HasSuffix(HPARefName, HPADisableSuffix) {
return HPARefName
}
return HPARefName + HPADisableSuffix
}
func removeSuffix(HPARefName string) string {
refName := HPARefName
for strings.HasSuffix(refName, HPADisableSuffix) {
refName = refName[:len(refName)-len(HPADisableSuffix)]
}
return refName
}

View File

@ -0,0 +1,151 @@
package hpa
import (
"context"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
// "github.com/openkruise/rollouts/api/v1alpha1"
// import hpa v1
)
var (
scheme = runtime.NewScheme()
)
func TestHPAPackage(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HPA Package Suite")
}
var _ = Describe("HPA Operations", func() {
var (
cli client.Client
object *unstructured.Unstructured
)
BeforeEach(func() {
object = &unstructured.Unstructured{}
object.SetGroupVersionKind(schema.GroupVersionKind{
Group: "apps",
Version: "v1",
Kind: "Deployment",
})
object.SetNamespace("default")
object.SetName("my-deployment")
cli = fake.NewClientBuilder().WithScheme(scheme).WithObjects(object).Build()
})
Context("when disabling and restoring HPA", func() {
It("should disable and restore HPA successfully", func() {
// Create a fake HPA
hpa := &unstructured.Unstructured{}
hpa.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v2",
Kind: "HorizontalPodAutoscaler",
})
hpa.SetNamespace("default")
hpa.SetName("my-hpa")
unstructured.SetNestedField(hpa.Object, map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "my-deployment",
}, "spec", "scaleTargetRef")
Expect(cli.Create(context.TODO(), hpa)).To(Succeed())
// Disable HPA
DisableHPA(cli, object)
fetchedHPA := &unstructured.Unstructured{}
fetchedHPA.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v2",
Kind: "HorizontalPodAutoscaler",
})
Expect(cli.Get(context.TODO(), types.NamespacedName{
Namespace: "default",
Name: "my-hpa",
}, fetchedHPA)).To(Succeed())
targetRef, found, err := unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef")
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeTrue())
ref := targetRef.(map[string]interface{})
Expect(ref["name"]).To(Equal("my-deployment" + HPADisableSuffix))
// Restore HPA
RestoreHPA(cli, object)
Expect(cli.Get(context.TODO(), types.NamespacedName{
Namespace: "default",
Name: "my-hpa",
}, fetchedHPA)).To(Succeed())
targetRef, found, err = unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef")
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeTrue())
ref = targetRef.(map[string]interface{})
Expect(ref["name"]).To(Equal("my-deployment"))
})
})
Context("when finding HPA for workload", func() {
It("should find the correct HPA", func() {
// Create a fake HPA v2
hpaV2 := &unstructured.Unstructured{}
hpaV2.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v2",
Kind: "HorizontalPodAutoscaler",
})
hpaV2.SetNamespace("default")
hpaV2.SetName("my-hpa-v2")
unstructured.SetNestedField(hpaV2.Object, map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "my-deployment",
}, "spec", "scaleTargetRef")
// Create a fake HPA v1
hpaV1 := &unstructured.Unstructured{}
hpaV1.SetGroupVersionKind(schema.GroupVersionKind{
Group: "autoscaling",
Version: "v1",
Kind: "HorizontalPodAutoscaler",
})
hpaV1.SetNamespace("default")
hpaV1.SetName("my-hpa-v1")
unstructured.SetNestedField(hpaV1.Object, map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "my-deployment",
}, "spec", "scaleTargetRef")
Expect(cli.Create(context.TODO(), hpaV2)).To(Succeed())
Expect(cli.Create(context.TODO(), hpaV1)).To(Succeed())
// Test finding HPA for workload
foundHPA := findHPAForWorkload(cli, object)
Expect(foundHPA).NotTo(BeNil())
Expect(foundHPA.GetName()).To(Equal("my-hpa-v2"))
// Delete v2 HPA and check if v1 is found
Expect(cli.Delete(context.TODO(), hpaV2)).To(Succeed())
foundHPA = findHPAForWorkload(cli, object)
Expect(foundHPA).NotTo(BeNil())
Expect(foundHPA.GetName()).To(Equal("my-hpa-v1"))
})
})
})

View File

@ -0,0 +1,48 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bluegreenstyle
import (
"github.com/openkruise/rollouts/api/v1beta1"
batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context"
"github.com/openkruise/rollouts/pkg/util"
corev1 "k8s.io/api/core/v1"
)
type Interface interface {
// BuildController will get workload object and parse workload info,
// and return a initialized controller for workload.
BuildController() (Interface, error)
// GetWorkloadInfo return workload information.
GetWorkloadInfo() *util.WorkloadInfo
// ListOwnedPods fetch the pods owned by the workload.
// Note that we should list pod only if we really need it.
// reserved for future use
ListOwnedPods() ([]*corev1.Pod, error)
// CalculateBatchContext calculate current batch context
// according to release plan and current status of workload.
CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error)
// Initialize do something before rolling out, for example:
// - pause the workload
// - update: MinReadySeconds, ProgressDeadlineSeconds, Strategy
Initialize(release *v1beta1.BatchRelease) error
// UpgradeBatch upgrade workload according current batch context.
UpgradeBatch(ctx *batchcontext.BatchContext) error
// Finalize do something after rolling out, for example:
// - set pause to false, restore the original setting, delete annotation
Finalize(release *v1beta1.BatchRelease) error
}

View File

@ -21,8 +21,10 @@ import (
"fmt"
"strings"
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/openkruise/rollouts/api/v1beta1"
"github.com/openkruise/rollouts/pkg/util"
apps "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -63,6 +65,42 @@ func IsControlledByBatchRelease(release *v1beta1.BatchRelease, object client.Obj
return false
}
// only when IsReadyForBlueGreenRelease returns true, can we go on to the next batch
func ValidateReadyForBlueGreenRelease(object client.Object) error {
// check the annotation
if object.GetAnnotations()[util.BatchReleaseControlAnnotation] == "" {
return fmt.Errorf("workload has no control info annotation")
}
switch o := object.(type) {
case *apps.Deployment:
// must be RollingUpdate
if len(o.Spec.Strategy.Type) > 0 && o.Spec.Strategy.Type != apps.RollingUpdateDeploymentStrategyType {
return fmt.Errorf("deployment strategy type is not RollingUpdate")
}
if o.Spec.Strategy.RollingUpdate == nil {
return fmt.Errorf("deployment strategy rollingUpdate is nil")
}
// MinReadySeconds and ProgressDeadlineSeconds must be set
if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds || o.Spec.ProgressDeadlineSeconds == nil || *o.Spec.ProgressDeadlineSeconds != v1beta1.MaxProgressSeconds {
return fmt.Errorf("deployment strategy minReadySeconds or progressDeadlineSeconds is not MaxReadySeconds or MaxProgressSeconds")
}
case *appsv1alpha1.CloneSet:
// must be ReCreate
if len(o.Spec.UpdateStrategy.Type) > 0 && o.Spec.UpdateStrategy.Type != appsv1alpha1.RecreateCloneSetUpdateStrategyType {
return fmt.Errorf("cloneSet strategy type is not ReCreate")
}
// MinReadySeconds and ProgressDeadlineSeconds must be set
if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds {
return fmt.Errorf("cloneSet strategy minReadySeconds is not MaxReadySeconds")
}
default:
panic("unsupported workload type to ValidateReadyForBlueGreenRelease function")
}
return nil
}
// BuildReleaseControlInfo return a NewControllerRef of release with escaped `"`.
func BuildReleaseControlInfo(release *v1beta1.BatchRelease) string {
owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind()))
@ -112,3 +150,101 @@ func IsCurrentMoreThanOrEqualToDesired(current, desired intstr.IntOrString) bool
desiredNum, _ := intstr.GetScaledValueFromIntOrPercent(&desired, 10000000, true)
return currentNum >= desiredNum
}
// GetDeploymentStrategy decode the strategy object for advanced deployment
// from the annotation "rollouts.kruise.io/original-deployment-strategy"
func GetOriginalSetting(object client.Object) (OriginalDeploymentStrategy, error) {
setting := OriginalDeploymentStrategy{}
settingStr := object.GetAnnotations()[v1beta1.OriginalDeploymentStrategyAnnotation]
if settingStr == "" {
return setting, nil
}
err := json.Unmarshal([]byte(settingStr), &setting)
return setting, err
}
// InitOriginalSetting will update the original setting based on the workload object
// note: update the maxSurge and maxUnavailable only when MaxSurge and MaxUnavailable are nil,
// which means they should keep unchanged in continuous release (though continuous release isn't supported for now)
func InitOriginalSetting(setting *OriginalDeploymentStrategy, object client.Object) {
var changeLogs []string
switch o := object.(type) {
case *apps.Deployment:
if setting.MaxSurge == nil {
setting.MaxSurge = getMaxSurgeFromDeployment(o.Spec.Strategy.RollingUpdate)
changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String()))
}
if setting.MaxUnavailable == nil {
setting.MaxUnavailable = getMaxUnavailableFromDeployment(o.Spec.Strategy.RollingUpdate)
changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String()))
}
if setting.ProgressDeadlineSeconds == nil {
setting.ProgressDeadlineSeconds = getIntPtrOrDefault(o.Spec.ProgressDeadlineSeconds, 600)
changeLogs = append(changeLogs, fmt.Sprintf("progressDeadlineSeconds changed from nil to %d", *setting.ProgressDeadlineSeconds))
}
if setting.MinReadySeconds == 0 {
setting.MinReadySeconds = o.Spec.MinReadySeconds
changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds))
}
case *appsv1alpha1.CloneSet:
if setting.MaxSurge == nil {
setting.MaxSurge = getMaxSurgeFromCloneset(o.Spec.UpdateStrategy)
changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String()))
}
if setting.MaxUnavailable == nil {
setting.MaxUnavailable = getMaxUnavailableFromCloneset(o.Spec.UpdateStrategy)
changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String()))
}
if setting.ProgressDeadlineSeconds == nil {
// cloneset is planned to support progressDeadlineSeconds field
}
if setting.MinReadySeconds == 0 {
setting.MinReadySeconds = o.Spec.MinReadySeconds
changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds))
}
default:
panic(fmt.Errorf("unsupported object type %T", o))
}
if len(changeLogs) == 0 {
klog.InfoS("InitOriginalSetting: original setting unchanged", "object", object.GetName())
return
}
klog.InfoS("InitOriginalSetting: original setting updated", "object", object.GetName(), "changes", strings.Join(changeLogs, ";"))
}
func getMaxSurgeFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString {
defaultMaxSurge := intstr.FromString("25%")
if ru == nil || ru.MaxSurge == nil {
return &defaultMaxSurge
}
return ru.MaxSurge
}
func getMaxUnavailableFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString {
defaultMaxAnavailale := intstr.FromString("25%")
if ru == nil || ru.MaxUnavailable == nil {
return &defaultMaxAnavailale
}
return ru.MaxUnavailable
}
func getMaxSurgeFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString {
defaultMaxSurge := intstr.FromString("0%")
if us.MaxSurge == nil {
return &defaultMaxSurge
}
return us.MaxSurge
}
func getMaxUnavailableFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString {
defaultMaxUnavailable := intstr.FromString("20%")
if us.MaxUnavailable == nil {
return &defaultMaxUnavailable
}
return us.MaxUnavailable
}
func getIntPtrOrDefault(ptr *int32, defaultVal int32) *int32 {
if ptr == nil {
return &defaultVal
}
return ptr
}

View File

@ -392,7 +392,10 @@ func (m *blueGreenReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, blu
// TODO: optimize the logic to better understand
blueGreenStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d",
br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1)
// br.Status.Message records messages that help users to understand what is going wrong
if len(br.Status.Message) > 0 {
blueGreenStatus.Message += fmt.Sprintf(", %s", br.Status.Message)
}
// sync rolloutId from blueGreenStatus to BatchRelease
if blueGreenStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID {
body := fmt.Sprintf(`{"spec":{"releasePlan":{"rolloutID":"%s"}}}`, blueGreenStatus.ObservedRolloutID)

View File

@ -457,6 +457,10 @@ func (m *canaryReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, canary
// TODO: optimize the logic to better understand
canaryStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d",
br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1)
// br.Status.Message records messages that help users to understand what is going wrong
if len(br.Status.Message) > 0 {
canaryStatus.Message += fmt.Sprintf(", %s", br.Status.Message)
}
// sync rolloutId from canaryStatus to BatchRelease
if canaryStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID {

View File

@ -45,6 +45,16 @@ func GetRolloutCondition(status v1beta1.RolloutStatus, condType v1beta1.RolloutC
return nil
}
func GetBatchReleaseCondition(status v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) *v1beta1.RolloutCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// SetRolloutCondition updates the rollout to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason, then we are not going to update
// by returning false. Returns true if the condition was updated
@ -63,6 +73,21 @@ func SetRolloutCondition(status *v1beta1.RolloutStatus, condition v1beta1.Rollou
return true
}
func SetBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condition v1beta1.RolloutCondition) bool {
currentCond := GetBatchReleaseCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason &&
currentCond.Message == condition.Message {
return false
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
return true
}
// filterOutCondition returns a new slice of rollout conditions without conditions with the provided type.
func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1.RolloutConditionType) []v1beta1.RolloutCondition {
var newConditions []v1beta1.RolloutCondition
@ -78,3 +103,7 @@ func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1.
func RemoveRolloutCondition(status *v1beta1.RolloutStatus, condType v1beta1.RolloutConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
func RemoveBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}

70
pkg/util/errors/types.go Normal file
View File

@ -0,0 +1,70 @@
package errors
import (
"errors"
"fmt"
)
// BenignError represents a benign error that can be handled or ignored by the caller.
// It encapsulates information that is non-critical and does not require immediate attention.
type BenignError struct {
Err error
}
// Error implements the error interface for BenignError.
// It returns the error message of the encapsulated error or a default message.
func (e *BenignError) Error() string {
if e.Err != nil {
return fmt.Sprintf("[benign]: %s", e.Err.Error())
}
return "benign error"
}
// NewBenignError creates a new instance of BenignError.
// If the provided err is nil, it signifies a benign condition without a specific error message.
func NewBenignError(err error) *BenignError {
return &BenignError{Err: err}
}
func IsBenign(err error) bool {
var benignErr *BenignError
return errors.As(err, &benignErr)
}
func AsBenign(err error, target **BenignError) bool {
return errors.As(err, target)
}
// FatalError represents a fatal error that requires special handling.
// Such errors are critical and may necessitate logging, alerts, or even program termination.
type FatalError struct {
Err error
}
// Error implements the error interface for FatalError.
// It returns the error message of the encapsulated error or a default message.
func (e *FatalError) Error() string {
if e.Err != nil {
return e.Err.Error()
}
return "fatal error"
}
// NewFatalError creates a new instance of FatalError.
// It encapsulates the provided error, marking it as critical.
func NewFatalError(err error) *FatalError {
return &FatalError{Err: err}
}
// IsFatal checks whether the provided error is of type FatalError.
// It returns true if the error is a FatalError or wraps a FatalError, false otherwise.
func IsFatal(err error) bool {
var fatalErr *FatalError
return AsFatal(err, &fatalErr)
}
// AsFatal attempts to cast the provided error to a FatalError.
// It returns true if the casting is successful, allowing the caller to handle it accordingly.
func AsFatal(err error, target **FatalError) bool {
return errors.As(err, target)
}

View File

@ -23,6 +23,8 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@ -222,3 +224,156 @@ func (s *DeploymentPatch) UpdatePaused(paused bool) *DeploymentPatch {
}
return s
}
func (s *DeploymentPatch) UpdateMinReadySeconds(seconds int32) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
spec["minReadySeconds"] = seconds
}
return s
}
func (s *DeploymentPatch) UpdateProgressDeadlineSeconds(seconds *int32) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
spec["progressDeadlineSeconds"] = seconds
}
return s
}
func (s *DeploymentPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["strategy"]; !ok {
spec["strategy"] = make(map[string]interface{})
}
strategy := spec["strategy"].(map[string]interface{})
if _, ok := strategy["rollingUpdate"]; !ok {
strategy["rollingUpdate"] = make(map[string]interface{})
}
rollingUpdate := strategy["rollingUpdate"].(map[string]interface{})
rollingUpdate["maxSurge"] = maxSurge
}
return s
}
func (s *DeploymentPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *DeploymentPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["strategy"]; !ok {
spec["strategy"] = make(map[string]interface{})
}
strategy := spec["strategy"].(map[string]interface{})
if _, ok := strategy["rollingUpdate"]; !ok {
strategy["rollingUpdate"] = make(map[string]interface{})
}
rollingUpdate := strategy["rollingUpdate"].(map[string]interface{})
rollingUpdate["maxUnavailable"] = maxUnavailable
}
return s
}
type ClonesetPatch struct {
CommonPatch
}
func NewClonesetPatch() *ClonesetPatch {
return &ClonesetPatch{CommonPatch{PatchType: types.MergePatchType, PatchData: make(map[string]interface{})}}
}
func (s *ClonesetPatch) UpdateMinReadySeconds(seconds int32) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updateMinReadySeconds to %v", seconds)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
spec["minReadySeconds"] = seconds
}
return s
}
func (s *ClonesetPatch) UpdatePaused(paused bool) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updatePaused to %v", paused)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["paused"] = paused
}
return s
}
func (s *ClonesetPatch) UpdatePartiton(partition *intstr.IntOrString) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updatePartition to %v", partition)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["partition"] = partition
}
return s
}
func (s *ClonesetPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updateMaxSurge to %v", maxSurge)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["maxSurge"] = maxSurge
}
return s
}
func (s *ClonesetPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *ClonesetPatch {
switch s.PatchType {
case types.StrategicMergePatchType, types.MergePatchType:
klog.Infof("updateMaxUnavailable to %v", maxUnavailable)
if _, ok := s.PatchData["spec"]; !ok {
s.PatchData["spec"] = make(map[string]interface{})
}
spec := s.PatchData["spec"].(map[string]interface{})
if _, ok := spec["updateStrategy"]; !ok {
spec["updateStrategy"] = make(map[string]interface{})
}
updateStrategy := spec["updateStrategy"].(map[string]interface{})
updateStrategy["maxUnavailable"] = maxUnavailable
}
return s
}

View File

@ -154,7 +154,7 @@ func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string {
func SafeEncodeString(s string) string {
r := make([]byte, len(s))
for i, b := range []rune(s) {
r[i] = alphanums[(int(b) % len(alphanums))]
r[i] = alphanums[int(b)%len(alphanums)]
}
return string(r)
}
@ -329,11 +329,11 @@ func IsWorkloadType(object client.Object, t WorkloadType) bool {
// DeploymentMaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func DeploymentMaxUnavailable(deployment *apps.Deployment) int32 {
strategy := deployment.Spec.Strategy
if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *(deployment.Spec.Replicas) == 0 {
if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *deployment.Spec.Replicas == 0 {
return int32(0)
}
// Error caught by validation
_, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
_, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *deployment.Spec.Replicas)
if maxUnavailable > *deployment.Spec.Replicas {
return *deployment.Spec.Replicas
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,12 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: hpa-dp
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: echoserver
minReplicas: 2
maxReplicas: 6
targetCPUUtilizationPercentage: 1

View File

@ -0,0 +1,24 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: hpa-dp
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: echoserver
behavior:
scaleDown:
stabilizationWindowSeconds: 10
# selectPolicy: Disabled
# scaleUp:
# selectPolicy: Disabled
minReplicas: 2
maxReplicas: 6
metrics:
- type: Resource
resource:
name: cpu
target:
type: AverageValue
averageValue: '1m'

View File

@ -10,21 +10,15 @@ spec:
strategy:
blueGreen:
steps:
- traffic: 20%
replicas: 20%
- replicas: 50%
traffic: 0%
pause: {}
- traffic: 40%
replicas: 40%
pause: {duration: 10}
- traffic: 60%
replicas: 60%
pause: {duration: 10}
- traffic: 80%
replicas: 80%
pause: {duration: 10}
- traffic: 100%
replicas: 100%
pause: {duration: 0}
- replicas: 100%
traffic: 0%
- replicas: 100%
traffic: 50%
- replicas: 100%
traffic: 100%
trafficRoutings:
- service: echoserver
ingress:

View File

@ -0,0 +1,24 @@
apiVersion: rollouts.kruise.io/v1beta1 # we use v1beta1
kind: Rollout
metadata:
name: rollouts-demo
spec:
workloadRef:
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
name: echoserver
strategy:
blueGreen:
steps:
- replicas: 100%
traffic: 0%
pause: {}
- replicas: 100%
traffic: 50%
- replicas: 100%
traffic: 100%
trafficRoutings:
- service: echoserver
ingress:
classType: nginx
name: echoserver