support cloneset & statefulset rollback in batches (#54)
Signed-off-by: mingzhou.swx <mingzhou.swx@alibaba-inc.com>
This commit is contained in:
parent
c322b09f96
commit
65b75a6615
|
|
@ -1,4 +1,4 @@
|
|||
name: E2E-1.19
|
||||
name: E2E-CloneSet-1.19
|
||||
|
||||
on:
|
||||
push:
|
||||
|
|
@ -95,7 +95,7 @@ jobs:
|
|||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --skip='Canary rollout with Gateway API' --focus='\[rollouts\] (Rollout)' test/e2e
|
||||
./bin/ginkgo -timeout 60m -v --focus='CloneSet canary rollout with Ingress' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
|
|
@ -107,4 +107,4 @@ jobs:
|
|||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
exit $retVal
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
name: E2E-1.23
|
||||
name: E2E-CloneSet-1.23
|
||||
|
||||
on:
|
||||
push:
|
||||
|
|
@ -95,7 +95,7 @@ jobs:
|
|||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --skip='Canary rollout with Gateway API' --focus='\[rollouts\] (Rollout)' test/e2e
|
||||
./bin/ginkgo -timeout 60m -v --focus='CloneSet canary rollout with Ingress' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
name: E2E-Deployment-1.19
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.17'
|
||||
KIND_IMAGE: 'kindest/node:v1.19.16'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
rollout:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
make helm
|
||||
helm repo add openkruise https://openkruise.github.io/charts/
|
||||
helm repo update
|
||||
helm install kruise openkruise/kruise
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
echo "Wait for kruise-manager ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Install Kruise Rollout
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-rollout -o yaml
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
echo "Wait for kruise-rollout ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-rollout ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='Deployment canary rollout with Ingress' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-rollout has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-rollout --no-headers
|
||||
echo "Kruise-rollout has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
name: E2E-Deployment-1.23
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.17'
|
||||
KIND_IMAGE: 'kindest/node:v1.23.3'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
rollout:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
make helm
|
||||
helm repo add openkruise https://openkruise.github.io/charts/
|
||||
helm repo update
|
||||
helm install kruise openkruise/kruise
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
echo "Wait for kruise-manager ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Install Kruise Rollout
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-rollout -o yaml
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
echo "Wait for kruise-rollout ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-rollout ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='Deployment canary rollout with Ingress' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-rollout has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-rollout --no-headers
|
||||
echo "Kruise-rollout has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
name: e2e-gateway
|
||||
name: E2E-Gateway
|
||||
|
||||
on:
|
||||
push:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
name: E2E-Others-1.19
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.17'
|
||||
KIND_IMAGE: 'kindest/node:v1.19.16'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
rollout:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
make helm
|
||||
helm repo add openkruise https://openkruise.github.io/charts/
|
||||
helm repo update
|
||||
helm install kruise openkruise/kruise
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
echo "Wait for kruise-manager ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Install Kruise Rollout
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-rollout -o yaml
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
echo "Wait for kruise-rollout ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-rollout ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='Others' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-rollout has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-rollout --no-headers
|
||||
echo "Kruise-rollout has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
name: E2E-Others-1.23
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.17'
|
||||
KIND_IMAGE: 'kindest/node:v1.23.3'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
rollout:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
make helm
|
||||
helm repo add openkruise https://openkruise.github.io/charts/
|
||||
helm repo update
|
||||
helm install kruise openkruise/kruise
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
echo "Wait for kruise-manager ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Install Kruise Rollout
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-rollout -o yaml
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
echo "Wait for kruise-rollout ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-rollout ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='Others' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-rollout has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-rollout --no-headers
|
||||
echo "Kruise-rollout has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
name: E2E-StatefulSet-1.19
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.17'
|
||||
KIND_IMAGE: 'kindest/node:v1.19.16'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
rollout:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
make helm
|
||||
helm repo add openkruise https://openkruise.github.io/charts/
|
||||
helm repo update
|
||||
helm install kruise openkruise/kruise
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
echo "Wait for kruise-manager ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Install Kruise Rollout
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-rollout -o yaml
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
echo "Wait for kruise-rollout ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-rollout ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='StatefulSet canary rollout with Ingress' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-rollout has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-rollout --no-headers
|
||||
echo "Kruise-rollout has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
name: E2E-StatefulSet-1.23
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.17'
|
||||
KIND_IMAGE: 'kindest/node:v1.23.3'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
rollout:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
make helm
|
||||
helm repo add openkruise https://openkruise.github.io/charts/
|
||||
helm repo update
|
||||
helm install kruise openkruise/kruise
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "2" ]; then
|
||||
echo "Wait for kruise-manager ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Install Kruise Rollout
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-rollout -o yaml
|
||||
set -e
|
||||
if [ "$PODS" -eq "1" ]; then
|
||||
echo "Wait for kruise-rollout ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-rollout ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='StatefulSet canary rollout with Ingress' test/e2e
|
||||
retVal=$?
|
||||
# kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout
|
||||
restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-rollout has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-rollout --no-headers
|
||||
echo "Kruise-rollout has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout
|
||||
exit 1
|
||||
fi
|
||||
exit $retVal
|
||||
|
|
@ -42,6 +42,8 @@ type ReleasePlan struct {
|
|||
// BatchPartition start from 0.
|
||||
// +optional
|
||||
BatchPartition *int32 `json:"batchPartition,omitempty"`
|
||||
// RolloutID indicates an id for each rollout progress
|
||||
RolloutID string `json:"rolloutID,omitempty"`
|
||||
}
|
||||
|
||||
// ReleaseBatch is used to describe how each batch release should be
|
||||
|
|
@ -50,9 +52,6 @@ type ReleaseBatch struct {
|
|||
// it can be an absolute number (ex: 5) or a percentage of workload replicas.
|
||||
// batches[i].canaryReplicas should less than or equal to batches[j].canaryReplicas if i < j.
|
||||
CanaryReplicas intstr.IntOrString `json:"canaryReplicas"`
|
||||
// The wait time, in seconds, between instances batches, default = 0
|
||||
// +optional
|
||||
PauseSeconds int64 `json:"pauseSeconds,omitempty"`
|
||||
}
|
||||
|
||||
// BatchReleaseStatus defines the observed state of a release plan
|
||||
|
|
@ -98,6 +97,8 @@ type BatchReleaseCanaryStatus struct {
|
|||
UpdatedReplicas int32 `json:"updatedReplicas,omitempty"`
|
||||
// UpdatedReadyReplicas is the number upgraded Pods that have a Ready Condition.
|
||||
UpdatedReadyReplicas int32 `json:"updatedReadyReplicas,omitempty"`
|
||||
// the number of pods that no need to rollback in rollback scene.
|
||||
NoNeedUpdateReplicas *int32 `json:"noNeedUpdateReplicas,omitempty"`
|
||||
}
|
||||
|
||||
type BatchReleaseBatchStateType string
|
||||
|
|
@ -111,6 +112,17 @@ const (
|
|||
ReadyBatchState BatchReleaseBatchStateType = "Ready"
|
||||
)
|
||||
|
||||
const (
|
||||
// RolloutPhaseCancelled indicates a rollout is cancelled
|
||||
RolloutPhaseCancelled RolloutPhase = "Cancelled"
|
||||
// RolloutPhaseFinalizing indicates a rollout is finalizing
|
||||
RolloutPhaseFinalizing RolloutPhase = "Finalizing"
|
||||
// RolloutPhaseCompleted indicates a rollout is completed
|
||||
RolloutPhaseCompleted RolloutPhase = "Completed"
|
||||
// RolloutPhasePreparing indicates a rollout is preparing for next progress.
|
||||
RolloutPhasePreparing RolloutPhase = "Preparing"
|
||||
)
|
||||
|
||||
const (
|
||||
// VerifyingBatchReleaseCondition indicates the controller is verifying whether workload
|
||||
// is ready to do rollout.
|
||||
|
|
|
|||
|
|
@ -104,8 +104,6 @@ type CanaryStep struct {
|
|||
// SetWeight sets what percentage of the canary pods should receive
|
||||
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
Weight *int32 `json:"weight,omitempty"`
|
||||
// Replicas is the number of expected canary pods in this batch
|
||||
// it can be an absolute number (ex: 5) or a percentage of total pods.
|
||||
|
|
@ -120,7 +118,6 @@ type CanaryStep struct {
|
|||
type RolloutPause struct {
|
||||
// Duration the amount of time to wait before moving to the next step.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Duration *int32 `json:"duration,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -129,7 +126,6 @@ type TrafficRouting struct {
|
|||
// Service holds the name of a service which selects pods with stable version and don't select any pods with canary version.
|
||||
Service string `json:"service"`
|
||||
// Optional duration in seconds the traffic provider(e.g. nginx ingress controller) consumes the service, ingress configuration changes gracefully.
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
GracePeriodSeconds int32 `json:"gracePeriodSeconds,omitempty"`
|
||||
// Ingress holds Ingress specific configuration to route traffic, e.g. Nginx, Alb.
|
||||
Ingress *IngressTrafficRouting `json:"ingress,omitempty"`
|
||||
|
|
@ -271,18 +267,10 @@ const (
|
|||
RolloutPhaseInitial RolloutPhase = "Initial"
|
||||
// RolloutPhaseHealthy indicates a rollout is healthy
|
||||
RolloutPhaseHealthy RolloutPhase = "Healthy"
|
||||
// RolloutPhasePreparing indicates a rollout is preparing for next progress.
|
||||
RolloutPhasePreparing RolloutPhase = "Preparing"
|
||||
// RolloutPhaseProgressing indicates a rollout is not yet healthy but still making progress towards a healthy state
|
||||
RolloutPhaseProgressing RolloutPhase = "Progressing"
|
||||
// RolloutPhaseFinalizing indicates a rollout is finalizing
|
||||
RolloutPhaseFinalizing RolloutPhase = "Finalizing"
|
||||
// RolloutPhaseTerminating indicates a rollout is terminated
|
||||
RolloutPhaseTerminating RolloutPhase = "Terminating"
|
||||
// RolloutPhaseCompleted indicates a rollout is completed
|
||||
RolloutPhaseCompleted RolloutPhase = "Completed"
|
||||
// RolloutPhaseCancelled indicates a rollout is cancelled
|
||||
RolloutPhaseCancelled RolloutPhase = "Cancelled"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
|
|
|
|||
|
|
@ -60,6 +60,11 @@ func (in *BatchReleaseCanaryStatus) DeepCopyInto(out *BatchReleaseCanaryStatus)
|
|||
in, out := &in.BatchReadyTime, &out.BatchReadyTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.NoNeedUpdateReplicas != nil {
|
||||
in, out := &in.NoNeedUpdateReplicas, &out.NoNeedUpdateReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchReleaseCanaryStatus.
|
||||
|
|
|
|||
|
|
@ -88,15 +88,13 @@ spec:
|
|||
should less than or equal to batches[j].canaryReplicas
|
||||
if i < j.'
|
||||
x-kubernetes-int-or-string: true
|
||||
pauseSeconds:
|
||||
description: The wait time, in seconds, between instances
|
||||
batches, default = 0
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- canaryReplicas
|
||||
type: object
|
||||
type: array
|
||||
rolloutID:
|
||||
description: RolloutID indicates an id for each rollout progress
|
||||
type: string
|
||||
type: object
|
||||
targetReference:
|
||||
description: TargetRef contains the GVK and name of the workload that
|
||||
|
|
@ -148,6 +146,11 @@ spec:
|
|||
it starts from 0
|
||||
format: int32
|
||||
type: integer
|
||||
noNeedUpdateReplicas:
|
||||
description: the number of pods that no need to rollback in rollback
|
||||
scene.
|
||||
format: int32
|
||||
type: integer
|
||||
updatedReadyReplicas:
|
||||
description: UpdatedReadyReplicas is the number upgraded Pods
|
||||
that have a Ready Condition.
|
||||
|
|
|
|||
|
|
@ -106,7 +106,6 @@ spec:
|
|||
description: Duration the amount of time to wait
|
||||
before moving to the next step.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
type: object
|
||||
replicas:
|
||||
|
|
@ -119,8 +118,6 @@ spec:
|
|||
x-kubernetes-int-or-string: true
|
||||
weight:
|
||||
format: int32
|
||||
maximum: 100
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
|
|
@ -149,7 +146,6 @@ spec:
|
|||
provider(e.g. nginx ingress controller) consumes the
|
||||
service, ingress configuration changes gracefully.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
ingress:
|
||||
description: Ingress holds Ingress specific configuration
|
||||
|
|
|
|||
8
go.sum
8
go.sum
|
|
@ -404,6 +404,8 @@ github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
|||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/openkruise/kruise-api v1.0.0 h1:ScA0LxRRNBsgbcyLhTzR9B+KpGNWsIMptzzmjTqfYQo=
|
||||
github.com/openkruise/kruise-api v1.0.0/go.mod h1:kxV/UA/vrf/hz3z+kL21c0NOawC6K1ZjaKcJFgiOwsE=
|
||||
github.com/openkruise/kruise-api v1.2.0 h1:MhoQtYT2tRdjrpb51xhn3lhEDWSlRGiMYQQ0Sh3zCkk=
|
||||
github.com/openkruise/kruise-api v1.2.0/go.mod h1:BKMffjLFufZkj/yVpF5TjXG9gMU3Y9A3FxrVOJ5LJUI=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
|
|
@ -974,6 +976,7 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
|||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.10/go.mod h1:0kei3F6biGjtRQBo5dUeujq6Ji3UCh9aOSfp/THYd7I=
|
||||
k8s.io/api v0.20.15/go.mod h1:X3JDf1BiTRQQ6xNAxTuhgi6yL2dHc6fSr9LGzE+Z3YU=
|
||||
k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
|
||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
||||
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
|
||||
|
|
@ -984,6 +987,7 @@ k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQR
|
|||
k8s.io/apiextensions-apiserver v0.22.6 h1:TH+9+EGtoVzzbrlfSDnObzFTnyXKqw1NBfT5XFATeJI=
|
||||
k8s.io/apiextensions-apiserver v0.22.6/go.mod h1:wNsLwy8mfIkGThiv4Qq/Hy4qRazViKXqmH5pfYiRKyY=
|
||||
k8s.io/apimachinery v0.20.10/go.mod h1:kQa//VOAwyVwJ2+L9kOREbsnryfsGSkSM1przND4+mw=
|
||||
k8s.io/apimachinery v0.20.15/go.mod h1:4KFiDSxCoGviCiRk9kTXIROsIf4VSGkVYjVJjJln3pg=
|
||||
k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
|
|
@ -993,12 +997,14 @@ k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU=
|
|||
k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI=
|
||||
k8s.io/apiserver v0.22.6/go.mod h1:OlL1rGa2kKWGj2JEXnwBcul/BwC9Twe95gm4ohtiIIs=
|
||||
k8s.io/client-go v0.20.10/go.mod h1:fFg+aLoasv/R+xiVaWjxeqGFYltzgQcOQzkFaSRfnJ0=
|
||||
k8s.io/client-go v0.20.15/go.mod h1:q/vywQFfGT3jw+lXQGA9sEJDH0QEX7XUT2PwrQ2qm/I=
|
||||
k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
|
||||
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
|
||||
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
|
||||
k8s.io/client-go v0.22.6 h1:ugAXeC312xeGXsn7zTRz+btgtLBnW3qYhtUUpVQL7YE=
|
||||
k8s.io/client-go v0.22.6/go.mod h1:TffU4AV2idZGeP+g3kdFZP+oHVHWPL1JYFySOALriw0=
|
||||
k8s.io/code-generator v0.20.10/go.mod h1:i6FmG+QxaLxvJsezvZp0q/gAEzzOz3U53KFibghWToU=
|
||||
k8s.io/code-generator v0.20.15/go.mod h1:MW85KuhTjX9nzhFYpRqUOYh4et0xeEBHTEjwBzFYGaM=
|
||||
k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
|
||||
k8s.io/code-generator v0.22.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
||||
k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
||||
|
|
@ -1025,6 +1031,8 @@ k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iL
|
|||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211110013926-83f114cd0513 h1:pbudjNtv90nOgR0/DUhPwKHnQ55Khz8+sNhJBIK7A5M=
|
||||
k8s.io/kube-openapi v0.0.0-20211110013926-83f114cd0513/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
|
|
|
|||
|
|
@ -102,6 +102,10 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
klog.V(3).Infof("Observed updated Spec for BatchRelease: %s/%s", newObject.Namespace, newObject.Name)
|
||||
return true
|
||||
}
|
||||
if len(oldObject.Annotations) != len(newObject.Annotations) || !reflect.DeepEqual(oldObject.Annotations, newObject.Annotations) {
|
||||
klog.V(3).Infof("Observed updated Annotation for BatchRelease: %s/%s", newObject.Namespace, newObject.Name)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
})
|
||||
|
|
|
|||
|
|
@ -71,15 +71,12 @@ var (
|
|||
Batches: []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("10%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("50%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("80%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -153,15 +150,12 @@ var (
|
|||
Batches: []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("10%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("50%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("80%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -536,7 +530,6 @@ func TestReconcile_CloneSet(t *testing.T) {
|
|||
Client: cli,
|
||||
recorder: rec,
|
||||
Scheme: scheme,
|
||||
executor: NewReleasePlanExecutor(cli, rec),
|
||||
}
|
||||
|
||||
key := client.ObjectKeyFromObject(release)
|
||||
|
|
@ -815,7 +808,6 @@ func TestReconcile_Deployment(t *testing.T) {
|
|||
Client: cli,
|
||||
recorder: rec,
|
||||
Scheme: scheme,
|
||||
executor: NewReleasePlanExecutor(cli, rec),
|
||||
}
|
||||
|
||||
key := client.ObjectKeyFromObject(release)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
kruiseappsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
utilclient "github.com/openkruise/rollouts/pkg/util/client"
|
||||
|
|
@ -122,13 +123,17 @@ func (w workloadEventHandler) Create(evt event.CreateEvent, q workqueue.RateLimi
|
|||
|
||||
func (w workloadEventHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
var gvk schema.GroupVersionKind
|
||||
switch evt.ObjectNew.(type) {
|
||||
case *appsv1.Deployment:
|
||||
gvk = util.ControllerKindDep
|
||||
switch obj := evt.ObjectNew.(type) {
|
||||
case *kruiseappsv1alpha1.CloneSet:
|
||||
gvk = util.ControllerKruiseKindCS
|
||||
case *appsv1.Deployment:
|
||||
gvk = util.ControllerKindDep
|
||||
case *appsv1.StatefulSet:
|
||||
gvk = util.ControllerKindSts
|
||||
case *kruiseappsv1beta1.StatefulSet:
|
||||
gvk = util.ControllerKruiseKindSts
|
||||
case *unstructured.Unstructured:
|
||||
gvk = evt.ObjectNew.(*unstructured.Unstructured).GroupVersionKind()
|
||||
gvk = obj.GroupVersionKind()
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
|
@ -168,13 +173,17 @@ func (w workloadEventHandler) Generic(evt event.GenericEvent, q workqueue.RateLi
|
|||
|
||||
func (w *workloadEventHandler) handleWorkload(q workqueue.RateLimitingInterface, obj client.Object, action EventAction) {
|
||||
var gvk schema.GroupVersionKind
|
||||
switch obj.(type) {
|
||||
switch o := obj.(type) {
|
||||
case *kruiseappsv1alpha1.CloneSet:
|
||||
gvk = util.ControllerKruiseKindCS
|
||||
case *appsv1.Deployment:
|
||||
gvk = util.ControllerKindDep
|
||||
case *appsv1.StatefulSet:
|
||||
gvk = util.ControllerKindSts
|
||||
case *kruiseappsv1beta1.StatefulSet:
|
||||
gvk = util.ControllerKruiseKindSts
|
||||
case *unstructured.Unstructured:
|
||||
gvk = obj.(*unstructured.Unstructured).GroupVersionKind()
|
||||
gvk = o.GroupVersionKind()
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultDuration = (50 * 1000) * time.Microsecond
|
||||
DefaultDuration = 2 * time.Second
|
||||
)
|
||||
|
||||
// Executor is the controller that controls the release plan resource
|
||||
|
|
@ -103,14 +103,18 @@ func (r *Executor) executeBatchReleasePlan(release *v1alpha1.BatchRelease, newSt
|
|||
case v1alpha1.RolloutPhasePreparing:
|
||||
// prepare and initialize something before progressing in this state.
|
||||
var preparedDone bool
|
||||
preparedDone, err = workloadController.PrepareBeforeProgress()
|
||||
var replicasNoNeedToRollback *int32
|
||||
preparedDone, replicasNoNeedToRollback, err = workloadController.PrepareBeforeProgress()
|
||||
switch {
|
||||
case err != nil:
|
||||
setCondition(newStatus, v1alpha1.PreparingBatchReleaseCondition, v1.ConditionFalse, v1alpha1.FailedBatchReleaseConditionReason, err.Error())
|
||||
case preparedDone:
|
||||
newStatus.Phase = v1alpha1.RolloutPhaseProgressing
|
||||
result = reconcile.Result{RequeueAfter: DefaultDuration}
|
||||
newStatus.CanaryStatus.NoNeedUpdateReplicas = replicasNoNeedToRollback
|
||||
setCondition(newStatus, v1alpha1.ProgressingBatchReleaseCondition, v1.ConditionTrue, "", "BatchRelease is progressing")
|
||||
default:
|
||||
result = reconcile.Result{RequeueAfter: DefaultDuration}
|
||||
}
|
||||
|
||||
case v1alpha1.RolloutPhaseProgressing:
|
||||
|
|
@ -189,7 +193,6 @@ func (r *Executor) progressBatches(release *v1alpha1.BatchRelease, newStatus *v1
|
|||
}
|
||||
|
||||
case v1alpha1.VerifyingBatchState:
|
||||
// TODO: metrics analysis
|
||||
// replicas/partition has been modified, should wait pod ready in this state.
|
||||
verified, verifiedErr := workloadController.CheckOneBatchReady()
|
||||
switch {
|
||||
|
|
@ -207,20 +210,10 @@ func (r *Executor) progressBatches(release *v1alpha1.BatchRelease, newStatus *v1
|
|||
|
||||
case v1alpha1.ReadyBatchState:
|
||||
if !IsPartitioned(release) {
|
||||
currentTimestamp := time.Now()
|
||||
currentBatch := release.Spec.ReleasePlan.Batches[release.Status.CanaryStatus.CurrentBatch]
|
||||
waitDuration := time.Duration(currentBatch.PauseSeconds) * time.Second
|
||||
if waitDuration > 0 && release.Status.CanaryStatus.BatchReadyTime.Time.Add(waitDuration).After(currentTimestamp) {
|
||||
restDuration := release.Status.CanaryStatus.BatchReadyTime.Time.Add(waitDuration).Sub(currentTimestamp)
|
||||
result = reconcile.Result{RequeueAfter: restDuration}
|
||||
setCondition(newStatus, "Progressing", v1.ConditionFalse, "Paused", fmt.Sprintf("BatchRelease will resume after %v", restDuration))
|
||||
klog.Infof("BatchRelease (%v) paused and will continue to reconcile after %v", klog.KObj(release), restDuration)
|
||||
} else {
|
||||
// expected pods in the batch are upgraded and the state is ready, then try to move to the next batch
|
||||
progressDone = r.moveToNextBatch(release, newStatus)
|
||||
result = reconcile.Result{RequeueAfter: DefaultDuration}
|
||||
setCondition(newStatus, v1alpha1.ProgressingBatchReleaseCondition, v1.ConditionTrue, "", "BatchRelease is progressing")
|
||||
}
|
||||
// expected pods in the batch are upgraded and the state is ready, then try to move to the next batch
|
||||
progressDone = r.moveToNextBatch(release, newStatus)
|
||||
result = reconcile.Result{RequeueAfter: DefaultDuration}
|
||||
setCondition(newStatus, v1alpha1.ProgressingBatchReleaseCondition, v1.ConditionTrue, "", "BatchRelease is progressing")
|
||||
} else {
|
||||
setCondition(newStatus, "Progressing", v1.ConditionFalse, "Paused", fmt.Sprintf("BatchRelease is partitioned in %v-th batch", newStatus.CanaryStatus.CurrentBatch))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -111,8 +111,8 @@ func (r *Executor) syncStatusBeforeExecuting(release *v1alpha1.BatchRelease, new
|
|||
// the workload scaling event, otherwise this event may be lost.
|
||||
newStatus.ObservedWorkloadReplicas = *workloadInfo.Replicas
|
||||
|
||||
case isWorkloadChanged(workloadEvent, release):
|
||||
// handle the case of continuous release v1 -> v2 -> v3
|
||||
case isWorkloadRevisionChanged(workloadEvent, release):
|
||||
// handle the case of continuous release
|
||||
reason = "TargetRevisionChanged"
|
||||
message = "workload revision was changed, then abort"
|
||||
signalFinalize(newStatus)
|
||||
|
|
@ -128,6 +128,19 @@ func (r *Executor) syncStatusBeforeExecuting(release *v1alpha1.BatchRelease, new
|
|||
reason = "WorkloadNotStable"
|
||||
message = "workload status is not stable, then wait"
|
||||
needStopThisRound = true
|
||||
|
||||
case isWorkloadRollbackInBatch(workloadEvent, release):
|
||||
// handle the case of rollback in batches
|
||||
if isRollbackInBatchSatisfied(workloadInfo, release) {
|
||||
reason = "RollbackInBatch"
|
||||
message = "workload is rollback in batch"
|
||||
signalRePrepareRollback(newStatus)
|
||||
newStatus.UpdateRevision = workloadInfo.Status.UpdateRevision
|
||||
} else {
|
||||
reason = "Rollback"
|
||||
message = "workload is preparing rollback, wait condition to be satisfied"
|
||||
needStopThisRound = true
|
||||
}
|
||||
}
|
||||
|
||||
// log the special event info
|
||||
|
|
@ -202,10 +215,15 @@ func isWorkloadScaling(event workloads.WorkloadEventType, release *v1alpha1.Batc
|
|||
return event == workloads.WorkloadReplicasChanged && release.Status.Phase == v1alpha1.RolloutPhaseProgressing
|
||||
}
|
||||
|
||||
func isWorkloadChanged(event workloads.WorkloadEventType, release *v1alpha1.BatchRelease) bool {
|
||||
func isWorkloadRevisionChanged(event workloads.WorkloadEventType, release *v1alpha1.BatchRelease) bool {
|
||||
return event == workloads.WorkloadPodTemplateChanged && release.Status.Phase == v1alpha1.RolloutPhaseProgressing
|
||||
}
|
||||
|
||||
func isWorkloadRollbackInBatch(event workloads.WorkloadEventType, release *v1alpha1.BatchRelease) bool {
|
||||
return (event == workloads.WorkloadRollbackInBatch || release.Annotations[util.RollbackInBatchAnnotation] != "") &&
|
||||
release.Status.CanaryStatus.NoNeedUpdateReplicas == nil && release.Status.Phase == v1alpha1.RolloutPhaseProgressing
|
||||
}
|
||||
|
||||
func isWorkloadUnhealthy(event workloads.WorkloadEventType, release *v1alpha1.BatchRelease) bool {
|
||||
return event == workloads.WorkloadUnHealthy && release.Status.Phase == v1alpha1.RolloutPhaseProgressing
|
||||
}
|
||||
|
|
@ -213,3 +231,10 @@ func isWorkloadUnhealthy(event workloads.WorkloadEventType, release *v1alpha1.Ba
|
|||
func isWorkloadUnstable(event workloads.WorkloadEventType, _ *v1alpha1.BatchRelease) bool {
|
||||
return event == workloads.WorkloadStillReconciling
|
||||
}
|
||||
|
||||
func isRollbackInBatchSatisfied(workloadInfo *util.WorkloadInfo, release *v1alpha1.BatchRelease) bool {
|
||||
if workloadInfo.Status == nil {
|
||||
return false
|
||||
}
|
||||
return workloadInfo.Status.StableRevision == workloadInfo.Status.UpdateRevision && release.Annotations[util.RollbackInBatchAnnotation] != ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,14 @@ func getInitializedStatus(status *v1alpha1.BatchReleaseStatus) *v1alpha1.BatchRe
|
|||
return newStatus
|
||||
}
|
||||
|
||||
func signalRePrepareRollback(newStatus *v1alpha1.BatchReleaseStatus) {
|
||||
newStatus.Phase = v1alpha1.RolloutPhasePreparing
|
||||
newStatus.CanaryStatus.BatchReadyTime = nil
|
||||
newStatus.CanaryStatus.CurrentBatchState = v1alpha1.UpgradingBatchState
|
||||
}
|
||||
|
||||
func signalReinitializeBatch(status *v1alpha1.BatchReleaseStatus) {
|
||||
status.CanaryStatus.BatchReadyTime = nil
|
||||
status.CanaryStatus.CurrentBatchState = v1alpha1.UpgradingBatchState
|
||||
}
|
||||
|
||||
|
|
@ -74,6 +81,7 @@ func signalRecalculate(release *v1alpha1.BatchRelease, newStatus *v1alpha1.Batch
|
|||
|
||||
klog.Infof("BatchRelease(%v) canary batch changed from %v to %v when the release plan changed",
|
||||
client.ObjectKeyFromObject(release), newStatus.CanaryStatus.CurrentBatch, currentBatch)
|
||||
newStatus.CanaryStatus.BatchReadyTime = nil
|
||||
newStatus.CanaryStatus.CurrentBatch = currentBatch
|
||||
newStatus.CanaryStatus.CurrentBatchState = v1alpha1.UpgradingBatchState
|
||||
newStatus.ObservedReleasePlanHash = util.HashReleasePlanBatches(&release.Spec.ReleasePlan)
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -84,7 +85,7 @@ func (c *CloneSetRolloutController) VerifyWorkload() (bool, error) {
|
|||
}
|
||||
|
||||
// if the cloneSet is not paused and is not under our control
|
||||
if !c.clone.Spec.UpdateStrategy.Paused {
|
||||
if !(c.clone.Spec.UpdateStrategy.Paused || c.clone.Spec.UpdateStrategy.Partition.IntVal > *c.clone.Spec.Replicas || c.clone.Spec.UpdateStrategy.Partition.StrVal == "100%") {
|
||||
message = fmt.Sprintf("CloneSet(%v) should be paused before execute the release plan", c.targetNamespacedName)
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -93,22 +94,85 @@ func (c *CloneSetRolloutController) VerifyWorkload() (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// prepareBeforeRollback makes sure that the updated pods have been patched no-need-update label.
|
||||
// return values:
|
||||
// - bool: whether all updated pods have been patched no-need-update label;
|
||||
// - *int32: how many pods have been patched;
|
||||
// - err: whether error occurs.
|
||||
func (c *CloneSetRolloutController) prepareBeforeRollback() (bool, *int32, error) {
|
||||
if c.release.Annotations[util.RollbackInBatchAnnotation] != "true" {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
noNeedRollbackReplicas := int32(0)
|
||||
rolloutID := c.release.Spec.ReleasePlan.RolloutID
|
||||
if rolloutID == "" {
|
||||
return true, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
pods, err := util.ListOwnedPods(c.client, c.clone)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods for CloneSet %v", c.targetNamespacedName)
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
updateRevision := c.clone.Status.UpdateRevision
|
||||
var filterPods []*v1.Pod
|
||||
for i := range pods {
|
||||
if !pods[i].DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
if !util.IsConsistentWithRevision(pods[i], updateRevision) {
|
||||
continue
|
||||
}
|
||||
if id, ok := pods[i].Labels[util.NoNeedUpdatePodLabel]; ok && id == rolloutID {
|
||||
noNeedRollbackReplicas++
|
||||
continue
|
||||
}
|
||||
filterPods = append(filterPods, pods[i])
|
||||
}
|
||||
|
||||
if len(filterPods) == 0 {
|
||||
return true, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
for _, pod := range filterPods {
|
||||
podClone := pod.DeepCopy()
|
||||
body := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, util.NoNeedUpdatePodLabel, rolloutID)
|
||||
err = c.client.Patch(context.TODO(), podClone, client.RawPatch(types.StrategicMergePatchType, []byte(body)))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to patch rollback labels[%s]=%s to pod %v", util.NoNeedUpdatePodLabel, rolloutID, client.ObjectKeyFromObject(pod))
|
||||
return false, &noNeedRollbackReplicas, err
|
||||
} else {
|
||||
klog.Info("Succeeded to patch rollback labels[%s]=%s to pod %v", util.NoNeedUpdatePodLabel, rolloutID, client.ObjectKeyFromObject(pod))
|
||||
}
|
||||
noNeedRollbackReplicas++
|
||||
}
|
||||
klog.Infof("BatchRelease(%v) find %v replicas no need to rollback", c.releasePlanKey, noNeedRollbackReplicas)
|
||||
return false, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
// PrepareBeforeProgress makes sure that the source and target CloneSet is under our control
|
||||
func (c *CloneSetRolloutController) PrepareBeforeProgress() (bool, error) {
|
||||
func (c *CloneSetRolloutController) PrepareBeforeProgress() (bool, *int32, error) {
|
||||
if err := c.fetchCloneSet(); err != nil {
|
||||
return false, err
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
done, noNeedRollbackReplicas, err := c.prepareBeforeRollback()
|
||||
if err != nil || !done {
|
||||
return false, noNeedRollbackReplicas, err
|
||||
}
|
||||
|
||||
// claim the cloneSet is under our control
|
||||
if _, err := c.claimCloneSet(c.clone); err != nil {
|
||||
return false, err
|
||||
return false, noNeedRollbackReplicas, err
|
||||
}
|
||||
|
||||
// record revisions and replicas info to BatchRelease.Status
|
||||
c.recordCloneSetRevisionAndReplicas()
|
||||
|
||||
c.recorder.Event(c.release, v1.EventTypeNormal, "InitializedSuccessfully", "Rollout resource are initialized")
|
||||
return true, nil
|
||||
return true, noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
// UpgradeOneBatch calculates the number of pods we can upgrade once according to the rollout spec
|
||||
|
|
@ -128,55 +192,64 @@ func (c *CloneSetRolloutController) UpgradeOneBatch() (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
currentBatch := c.release.Status.CanaryStatus.CurrentBatch
|
||||
// the number of canary pods should have in current batch
|
||||
canaryGoal := c.calculateCurrentCanary(c.newStatus.ObservedWorkloadReplicas)
|
||||
// the number of stable pods should have in current batch
|
||||
stableGoal := c.calculateCurrentStable(c.newStatus.ObservedWorkloadReplicas)
|
||||
// the number of canary pods now we have in current state
|
||||
currentCanaryReplicas := c.clone.Status.UpdatedReplicas
|
||||
// workload partition calculated
|
||||
workloadPartition, _ := intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.Partition,
|
||||
int(c.newStatus.ObservedWorkloadReplicas), true)
|
||||
pods, err := util.ListOwnedPods(c.client, c.clone)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods for CloneSet %v", c.targetNamespacedName)
|
||||
return false, err
|
||||
}
|
||||
|
||||
var noNeedRollbackReplicas int32
|
||||
if c.newStatus.CanaryStatus.NoNeedUpdateReplicas != nil {
|
||||
noNeedRollbackReplicas = countNoNeedRollbackReplicas(pods, c.newStatus.UpdateRevision, c.release.Spec.ReleasePlan.RolloutID)
|
||||
c.newStatus.CanaryStatus.NoNeedUpdateReplicas = pointer.Int32(noNeedRollbackReplicas)
|
||||
}
|
||||
|
||||
updatedReplicas := c.clone.Status.UpdatedReplicas
|
||||
replicas := c.newStatus.ObservedWorkloadReplicas
|
||||
currentBatch := c.newStatus.CanaryStatus.CurrentBatch
|
||||
partitionedStableReplicas, _ := intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.Partition, int(replicas), true)
|
||||
|
||||
// the number of canary pods should have in current batch in plan
|
||||
plannedBatchCanaryReplicas := c.calculateCurrentCanary(c.newStatus.ObservedWorkloadReplicas)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchCanaryReplicas := c.calculateCurrentCanary(replicas - noNeedRollbackReplicas)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchStableReplicas := replicas - noNeedRollbackReplicas - expectedBatchCanaryReplicas
|
||||
|
||||
// if canaryReplicas is int, then we use int;
|
||||
// if canaryReplicas is percentage, then we use percentage.
|
||||
var partitionGoal intstr.IntOrString
|
||||
canaryIntOrStr := c.release.Spec.ReleasePlan.Batches[c.newStatus.CanaryStatus.CurrentBatch].CanaryReplicas
|
||||
var expectedPartition intstr.IntOrString
|
||||
canaryIntOrStr := c.release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas
|
||||
if canaryIntOrStr.Type == intstr.Int {
|
||||
partitionGoal = intstr.FromInt(int(stableGoal))
|
||||
expectedPartition = intstr.FromInt(int(expectedBatchStableReplicas))
|
||||
} else if c.newStatus.ObservedWorkloadReplicas > 0 {
|
||||
partitionGoal = ParseIntegerAsPercentageIfPossible(stableGoal, c.newStatus.ObservedWorkloadReplicas, &canaryIntOrStr)
|
||||
expectedPartition = ParseIntegerAsPercentageIfPossible(expectedBatchStableReplicas, c.newStatus.ObservedWorkloadReplicas, &canaryIntOrStr)
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("upgraded one batch, current info:",
|
||||
"BatchRelease", c.releasePlanKey,
|
||||
"current-batch", currentBatch,
|
||||
"canary-goal", canaryGoal,
|
||||
"stable-goal", stableGoal,
|
||||
"partition-goal", partitionGoal,
|
||||
"partition-current", workloadPartition,
|
||||
"canary-replicas", currentCanaryReplicas)
|
||||
"currentBatch", currentBatch,
|
||||
"replicas", replicas,
|
||||
"updatedReplicas", updatedReplicas,
|
||||
"noNeedRollbackReplicas", noNeedRollbackReplicas,
|
||||
"partitionedStableReplicas", partitionedStableReplicas,
|
||||
"plannedBatchCanaryReplicas", plannedBatchCanaryReplicas,
|
||||
"expectedBatchCanaryReplicas", expectedBatchCanaryReplicas,
|
||||
"expectedBatchStableReplicas", expectedBatchStableReplicas,
|
||||
"expectedPartition", expectedPartition)
|
||||
|
||||
// in case of no need to upgrade pods
|
||||
IsUpgradedDone := func() bool {
|
||||
return currentCanaryReplicas >= canaryGoal && int32(workloadPartition) <= stableGoal
|
||||
// 1. the number of upgrade pod satisfied; 2. partition has been satisfied
|
||||
IsWorkloadUpgraded := updatedReplicas >= expectedBatchCanaryReplicas && int32(partitionedStableReplicas) <= expectedBatchStableReplicas
|
||||
if !IsWorkloadUpgraded {
|
||||
return false, c.patchCloneSetPartition(c.clone, &expectedPartition)
|
||||
}
|
||||
|
||||
if !IsUpgradedDone() {
|
||||
if err := c.patchCloneSetPartition(c.clone, &partitionGoal); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// patch current batch label to pods
|
||||
patchDone, err := c.patchPodBatchLabel(canaryGoal)
|
||||
patchDone, err := c.patchPodBatchLabel(pods, plannedBatchCanaryReplicas, expectedBatchStableReplicas)
|
||||
if !patchDone || err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
c.recorder.Eventf(c.release, v1.EventTypeNormal, "SetBatchDone",
|
||||
"Finished submitting all upgrade quests for batch %d", c.newStatus.CanaryStatus.CurrentBatch)
|
||||
c.recorder.Eventf(c.release, v1.EventTypeNormal, "SetBatchDone", "Finished submitting all upgrade quests for batch %d", c.newStatus.CanaryStatus.CurrentBatch)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
@ -191,60 +264,53 @@ func (c *CloneSetRolloutController) CheckOneBatchReady() (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// the number of canary pods now we have in current state
|
||||
canaryReplicas := c.clone.Status.UpdatedReplicas
|
||||
// the number of stable pods now we have in current state
|
||||
stableReplicas := c.clone.Status.Replicas - canaryReplicas
|
||||
// the number of canary pods that have been ready in current state
|
||||
canaryReadyReplicas := c.clone.Status.UpdatedReadyReplicas
|
||||
// the number of expected stable pods should have in current batch, but this number may
|
||||
// be inconsistent with the real canary goal due to the accuracy of percent-type partition
|
||||
expectedStableGoal := c.calculateCurrentStable(c.newStatus.ObservedWorkloadReplicas)
|
||||
// the number of the real canary pods should have in current batch
|
||||
originalGoal := &c.release.Spec.ReleasePlan.Batches[c.newStatus.CanaryStatus.CurrentBatch].CanaryReplicas
|
||||
canaryGoal := CalculateRealCanaryReplicasGoal(expectedStableGoal, c.newStatus.ObservedWorkloadReplicas, originalGoal)
|
||||
// the number of the real stable pods should have in current batch
|
||||
stableGoal := c.newStatus.ObservedWorkloadReplicas - canaryGoal
|
||||
// the number of max unavailable canary pods allowed by this workload
|
||||
maxUnavailable := 0
|
||||
var noNeedRollbackReplicas int32
|
||||
if c.newStatus.CanaryStatus.NoNeedUpdateReplicas != nil {
|
||||
noNeedRollbackReplicas = *c.newStatus.CanaryStatus.NoNeedUpdateReplicas
|
||||
}
|
||||
|
||||
replicas := *c.clone.Spec.Replicas
|
||||
// the number of updated pods
|
||||
updatedReplicas := c.clone.Status.UpdatedReplicas
|
||||
// the number of updated ready pods
|
||||
updatedReadyReplicas := c.clone.Status.UpdatedReadyReplicas
|
||||
|
||||
// current batch id
|
||||
currentBatch := c.newStatus.CanaryStatus.CurrentBatch
|
||||
// the number of pods will be partitioned by cloneSet
|
||||
partitionedStableReplicas, _ := intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.Partition, int(replicas), true)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchCanaryReplicas := c.calculateCurrentCanary(replicas - noNeedRollbackReplicas)
|
||||
// the number of stable pods that consider rollback context and other real-world situations
|
||||
expectedBatchStableReplicas := replicas - noNeedRollbackReplicas - expectedBatchCanaryReplicas
|
||||
// the number of canary pods that cloneSet will be upgraded
|
||||
realNeedUpgradeCanaryReplicas := CalculateRealCanaryReplicasGoal(expectedBatchStableReplicas, replicas, &c.release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas)
|
||||
|
||||
var maxUnavailableReplicas int
|
||||
if c.clone.Spec.UpdateStrategy.MaxUnavailable != nil {
|
||||
maxUnavailable, _ = intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.MaxUnavailable, int(canaryGoal), true)
|
||||
maxUnavailableReplicas, _ = intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.MaxUnavailable, int(realNeedUpgradeCanaryReplicas), true)
|
||||
}
|
||||
|
||||
klog.InfoS("checking the batch releasing progress",
|
||||
klog.V(3).InfoS("check one batch, current info:",
|
||||
"BatchRelease", c.releasePlanKey,
|
||||
"current-batch", c.newStatus.CanaryStatus.CurrentBatch,
|
||||
"canary-goal", canaryGoal,
|
||||
"stable-goal", stableGoal,
|
||||
"stable-replicas", stableReplicas,
|
||||
"max-unavailable", maxUnavailable,
|
||||
"canary-ready-replicas", canaryReadyReplicas)
|
||||
"currentBatch", currentBatch,
|
||||
"replicas", replicas,
|
||||
"updatedReplicas", updatedReplicas,
|
||||
"noNeedRollbackReplicas", noNeedRollbackReplicas,
|
||||
"maxUnavailableReplicas", maxUnavailableReplicas,
|
||||
"partitionedStableReplicas", partitionedStableReplicas,
|
||||
"expectedBatchCanaryReplicas", expectedBatchCanaryReplicas,
|
||||
"expectedBatchStableReplicas", expectedBatchStableReplicas)
|
||||
|
||||
// maybe, the workload replicas was scaled, we should requeue and handle the workload scaling event
|
||||
if c.clone.Status.Replicas != c.newStatus.ObservedWorkloadReplicas {
|
||||
err := fmt.Errorf("CloneSet(%v) replicas don't match ObservedWorkloadReplicas, workload status replicas: %v, observed workload replicas: %v",
|
||||
c.targetNamespacedName, c.clone.Status.Replicas, c.newStatus.ObservedWorkloadReplicas)
|
||||
klog.ErrorS(err, "the batch is not valid", "current-batch", c.newStatus.CanaryStatus.CurrentBatch)
|
||||
currentBatchIsReady := updatedReplicas >= realNeedUpgradeCanaryReplicas && // 1.the number of upgrade pods achieved the goal
|
||||
updatedReadyReplicas+int32(maxUnavailableReplicas) >= realNeedUpgradeCanaryReplicas && // 2.the number of upgraded available pods achieved the goal
|
||||
(realNeedUpgradeCanaryReplicas == 0 || updatedReadyReplicas >= 1) // 3.make sure that at least one upgrade pod is available
|
||||
|
||||
if !currentBatchIsReady {
|
||||
klog.InfoS("the batch is not ready yet", "BatchRelease", c.releasePlanKey, "current-batch", c.newStatus.CanaryStatus.CurrentBatch)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
currentBatchIsNotReadyYet := func() bool {
|
||||
// the number of upgrade pods does not achieve the goal
|
||||
return canaryGoal > canaryReplicas ||
|
||||
// the number of upgraded available pods does not achieve the goal
|
||||
canaryReadyReplicas+int32(maxUnavailable) < canaryGoal ||
|
||||
// make sure that at least one upgrade pod is available
|
||||
(canaryGoal > 0 && canaryReadyReplicas == 0)
|
||||
}
|
||||
|
||||
if currentBatchIsNotReadyYet() {
|
||||
klog.InfoS("the batch is not ready yet", "BatchRelease",
|
||||
c.releasePlanKey, "current-batch", c.newStatus.CanaryStatus.CurrentBatch)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
klog.Infof("All pods of CloneSet(%v) in current batch are ready, BatchRelease(%v), current-batch=%v",
|
||||
c.targetNamespacedName, c.releasePlanKey, c.newStatus.CanaryStatus.CurrentBatch)
|
||||
c.recorder.Eventf(c.release, v1.EventTypeNormal, "BatchAvailable", "Batch %d is available", c.newStatus.CanaryStatus.CurrentBatch)
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -288,6 +354,8 @@ func (c *CloneSetRolloutController) SyncWorkloadInfo() (WorkloadEventType, *util
|
|||
Status: &util.WorkloadStatus{
|
||||
UpdatedReplicas: c.clone.Status.UpdatedReplicas,
|
||||
UpdatedReadyReplicas: c.clone.Status.UpdatedReadyReplicas,
|
||||
UpdateRevision: c.clone.Status.UpdateRevision,
|
||||
StableRevision: c.clone.Status.CurrentRevision,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -304,6 +372,16 @@ func (c *CloneSetRolloutController) SyncWorkloadInfo() (WorkloadEventType, *util
|
|||
return WorkloadReplicasChanged, workloadInfo, nil
|
||||
}
|
||||
|
||||
// updateRevision == CurrentRevision means CloneSet is rolling back or newly-created.
|
||||
if c.clone.Status.UpdateRevision == c.clone.Status.CurrentRevision &&
|
||||
// stableRevision == UpdateRevision means CloneSet is rolling back instead of newly-created.
|
||||
c.newStatus.StableRevision == c.clone.Status.UpdateRevision &&
|
||||
// StableRevision != observed UpdateRevision means the rollback event have not been observed.
|
||||
c.newStatus.StableRevision != c.newStatus.UpdateRevision {
|
||||
klog.Warningf("CloneSet(%v) is rolling back in batches", c.targetNamespacedName)
|
||||
return WorkloadRollbackInBatch, workloadInfo, nil
|
||||
}
|
||||
|
||||
// in case of that the workload was changed
|
||||
if c.clone.Status.UpdateRevision != c.newStatus.UpdateRevision {
|
||||
klog.Warningf("CloneSet(%v) updateRevision changed during releasing, should try to restart the release plan, "+
|
||||
|
|
@ -336,19 +414,16 @@ func (c *CloneSetRolloutController) recordCloneSetRevisionAndReplicas() {
|
|||
c.newStatus.UpdateRevision = c.clone.Status.UpdateRevision
|
||||
}
|
||||
|
||||
func (c *CloneSetRolloutController) patchPodBatchLabel(canaryGoal int32) (bool, error) {
|
||||
rolloutID, exist := c.release.Labels[util.RolloutIDLabel]
|
||||
if !exist || rolloutID == "" {
|
||||
func (c *CloneSetRolloutController) patchPodBatchLabel(pods []*v1.Pod, plannedBatchCanaryReplicas, expectedBatchStableReplicas int32) (bool, error) {
|
||||
rolloutID := c.release.Spec.ReleasePlan.RolloutID
|
||||
if rolloutID == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
pods, err := util.ListOwnedPods(c.client, c.clone)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods for CloneSet %v", c.targetNamespacedName)
|
||||
return false, err
|
||||
}
|
||||
|
||||
batchID := c.release.Status.CanaryStatus.CurrentBatch + 1
|
||||
updateRevision := c.release.Status.UpdateRevision
|
||||
return util.PatchPodBatchLabel(c.client, pods, rolloutID, batchID, updateRevision, canaryGoal, c.releasePlanKey)
|
||||
batchID := c.release.Status.CanaryStatus.CurrentBatch + 1
|
||||
if c.newStatus.CanaryStatus.NoNeedUpdateReplicas != nil {
|
||||
pods = filterPodsForUnorderedRollback(pods, plannedBatchCanaryReplicas, expectedBatchStableReplicas, c.release.Status.ObservedWorkloadReplicas, rolloutID, updateRevision)
|
||||
}
|
||||
return patchPodBatchLabel(c.client, pods, rolloutID, batchID, updateRevision, plannedBatchCanaryReplicas, c.releasePlanKey)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ func (c *cloneSetController) patchCloneSetPartition(clone *kruiseappsv1alpha1.Cl
|
|||
|
||||
// the canary workload size for the current batch
|
||||
func (c *cloneSetController) calculateCurrentCanary(totalSize int32) int32 {
|
||||
targetSize := int32(util.CalculateNewBatchTarget(&c.release.Spec.ReleasePlan, int(totalSize), int(c.newStatus.CanaryStatus.CurrentBatch)))
|
||||
targetSize := int32(calculateNewBatchTarget(&c.release.Spec.ReleasePlan, int(totalSize), int(c.newStatus.CanaryStatus.CurrentBatch)))
|
||||
klog.InfoS("Calculated the number of pods in the target CloneSet after current batch",
|
||||
"CloneSet", c.targetNamespacedName, "BatchRelease", c.releasePlanKey,
|
||||
"current batch", c.newStatus.CanaryStatus.CurrentBatch, "workload updateRevision size", targetSize)
|
||||
|
|
|
|||
|
|
@ -66,15 +66,12 @@ var (
|
|||
Batches: []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("10%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("50%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("80%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -228,12 +225,3 @@ func TestParseIntegerAsPercentage(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func containers(version string) []corev1.Container {
|
||||
return []corev1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: fmt.Sprintf("busybox:%v", version),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,245 @@
|
|||
package workloads
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/integer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func filterPodsForUnorderedRollback(pods []*corev1.Pod, plannedBatchCanaryReplicas, expectedBatchStableReplicas, replicas int32, rolloutID, updateRevision string) []*corev1.Pod {
|
||||
var noNeedRollbackReplicas int32
|
||||
var realNeedRollbackReplicas int32
|
||||
var expectedRollbackReplicas int32 // total need rollback
|
||||
|
||||
var terminatingPods []*corev1.Pod
|
||||
var needRollbackPods []*corev1.Pod
|
||||
var noNeedRollbackPods []*corev1.Pod
|
||||
|
||||
for _, pod := range pods {
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
terminatingPods = append(terminatingPods, pod)
|
||||
continue
|
||||
}
|
||||
if !util.IsConsistentWithRevision(pod, updateRevision) {
|
||||
continue
|
||||
}
|
||||
podRolloutID := pod.Labels[util.RolloutIDLabel]
|
||||
podRollbackID := pod.Labels[util.NoNeedUpdatePodLabel]
|
||||
if podRollbackID == rolloutID && podRolloutID != rolloutID {
|
||||
noNeedRollbackReplicas++
|
||||
noNeedRollbackPods = append(noNeedRollbackPods, pod)
|
||||
} else {
|
||||
needRollbackPods = append(needRollbackPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
expectedRollbackReplicas = replicas - expectedBatchStableReplicas
|
||||
realNeedRollbackReplicas = expectedRollbackReplicas - noNeedRollbackReplicas
|
||||
if realNeedRollbackReplicas <= 0 { // may never occur
|
||||
return pods
|
||||
}
|
||||
|
||||
diff := plannedBatchCanaryReplicas - realNeedRollbackReplicas
|
||||
if diff <= 0 {
|
||||
return append(needRollbackPods, terminatingPods...)
|
||||
}
|
||||
|
||||
lastIndex := integer.Int32Min(diff, int32(len(noNeedRollbackPods)))
|
||||
return append(append(needRollbackPods, noNeedRollbackPods[:lastIndex]...), terminatingPods...)
|
||||
}
|
||||
|
||||
// TODO: support advanced statefulSet reserveOrdinal feature
|
||||
func filterPodsForOrderedRollback(pods []*corev1.Pod, plannedBatchCanaryReplicas, expectedBatchStableReplicas, replicas int32, rolloutID, updateRevision string) []*corev1.Pod {
|
||||
var terminatingPods []*corev1.Pod
|
||||
var needRollbackPods []*corev1.Pod
|
||||
var noNeedRollbackPods []*corev1.Pod
|
||||
|
||||
sortPodsByOrdinal(pods)
|
||||
for _, pod := range pods {
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
terminatingPods = append(terminatingPods, pod)
|
||||
continue
|
||||
}
|
||||
if !util.IsConsistentWithRevision(pod, updateRevision) {
|
||||
continue
|
||||
}
|
||||
if getPodOrdinal(pod) >= int(expectedBatchStableReplicas) {
|
||||
needRollbackPods = append(needRollbackPods, pod)
|
||||
} else {
|
||||
noNeedRollbackPods = append(noNeedRollbackPods, pod)
|
||||
}
|
||||
}
|
||||
realNeedRollbackReplicas := replicas - expectedBatchStableReplicas
|
||||
if realNeedRollbackReplicas <= 0 { // may never occur
|
||||
return pods
|
||||
}
|
||||
|
||||
diff := plannedBatchCanaryReplicas - realNeedRollbackReplicas
|
||||
if diff <= 0 {
|
||||
return append(needRollbackPods, terminatingPods...)
|
||||
}
|
||||
|
||||
lastIndex := integer.Int32Min(diff, int32(len(noNeedRollbackPods)))
|
||||
return append(append(needRollbackPods, noNeedRollbackPods[:lastIndex]...), terminatingPods...)
|
||||
}
|
||||
|
||||
func countNoNeedRollbackReplicas(pods []*corev1.Pod, updateRevision, rolloutID string) int32 {
|
||||
noNeedRollbackReplicas := int32(0)
|
||||
for _, pod := range pods {
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
if !util.IsConsistentWithRevision(pod, updateRevision) {
|
||||
continue
|
||||
}
|
||||
id, ok := pod.Labels[util.NoNeedUpdatePodLabel]
|
||||
if ok && id == rolloutID {
|
||||
noNeedRollbackReplicas++
|
||||
}
|
||||
}
|
||||
return noNeedRollbackReplicas
|
||||
}
|
||||
|
||||
// patchPodBatchLabel will patch rollout-id && batch-id to pods
|
||||
func patchPodBatchLabel(c client.Client, pods []*corev1.Pod, rolloutID string, batchID int32, updateRevision string, replicas int32, logKey types.NamespacedName) (bool, error) {
|
||||
// the number of active pods that has been patched successfully.
|
||||
patchedUpdatedReplicas := int32(0)
|
||||
for _, pod := range pods {
|
||||
if !util.IsConsistentWithRevision(pod, updateRevision) {
|
||||
continue
|
||||
}
|
||||
|
||||
podRolloutID := pod.Labels[util.RolloutIDLabel]
|
||||
if pod.DeletionTimestamp.IsZero() && podRolloutID == rolloutID {
|
||||
patchedUpdatedReplicas++
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
podRolloutID := pod.Labels[util.RolloutIDLabel]
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
// we don't patch label for the active old revision pod
|
||||
if !util.IsConsistentWithRevision(pod, updateRevision) {
|
||||
continue
|
||||
}
|
||||
// we don't continue to patch if the goal is met
|
||||
if patchedUpdatedReplicas >= replicas {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if it has been patched, just ignore
|
||||
if podRolloutID == rolloutID {
|
||||
continue
|
||||
}
|
||||
|
||||
podClone := pod.DeepCopy()
|
||||
by := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s","%s":"%d"}}}`, util.RolloutIDLabel, rolloutID, util.RolloutBatchIDLabel, batchID)
|
||||
err := c.Patch(context.TODO(), podClone, client.RawPatch(types.StrategicMergePatchType, []byte(by)))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to patch Pod(%v) batchID, err: %v", client.ObjectKeyFromObject(pod), err)
|
||||
return false, err
|
||||
} else {
|
||||
klog.Infof("Succeed to patch Pod(%v) batchID, err: %v", client.ObjectKeyFromObject(pod), err)
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
patchedUpdatedReplicas++
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Patch %v pods with batchID for batchRelease %v, goal is %d pods", patchedUpdatedReplicas, logKey, replicas)
|
||||
return patchedUpdatedReplicas >= replicas, nil
|
||||
}
|
||||
|
||||
func releaseWorkload(c client.Client, object client.Object) error {
|
||||
_, found := object.GetAnnotations()[util.BatchReleaseControlAnnotation]
|
||||
if !found {
|
||||
klog.V(3).Infof("Workload(%v) is already released", client.ObjectKeyFromObject(object))
|
||||
return nil
|
||||
}
|
||||
|
||||
clone := object.DeepCopyObject().(client.Object)
|
||||
patchByte := []byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":null}}}`, util.BatchReleaseControlAnnotation))
|
||||
return c.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte))
|
||||
}
|
||||
|
||||
func claimWorkload(c client.Client, planController *v1alpha1.BatchRelease, object client.Object, patchUpdateStrategy map[string]interface{}) error {
|
||||
if controlInfo, ok := object.GetAnnotations()[util.BatchReleaseControlAnnotation]; ok && controlInfo != "" {
|
||||
ref := &metav1.OwnerReference{}
|
||||
err := json.Unmarshal([]byte(controlInfo), ref)
|
||||
if err == nil && ref.UID == planController.UID {
|
||||
klog.V(3).Infof("Workload(%v) has been controlled by this BatchRelease(%v), no need to claim again",
|
||||
client.ObjectKeyFromObject(object), client.ObjectKeyFromObject(planController))
|
||||
return nil
|
||||
} else {
|
||||
klog.Errorf("Failed to parse controller info from Workload(%v) annotation, error: %v, controller info: %+v",
|
||||
client.ObjectKeyFromObject(object), err, *ref)
|
||||
}
|
||||
}
|
||||
|
||||
controlInfo, _ := json.Marshal(metav1.NewControllerRef(planController, planController.GetObjectKind().GroupVersionKind()))
|
||||
patch := map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]string{
|
||||
util.BatchReleaseControlAnnotation: string(controlInfo),
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"updateStrategy": patchUpdateStrategy,
|
||||
},
|
||||
}
|
||||
|
||||
patchByte, _ := json.Marshal(patch)
|
||||
clone := object.DeepCopyObject().(client.Object)
|
||||
return c.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte))
|
||||
}
|
||||
|
||||
func patchSpec(c client.Client, object client.Object, spec map[string]interface{}) error {
|
||||
patchByte, err := json.Marshal(map[string]interface{}{"spec": spec})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clone := object.DeepCopyObject().(client.Object)
|
||||
return c.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte))
|
||||
}
|
||||
|
||||
func calculateNewBatchTarget(rolloutSpec *v1alpha1.ReleasePlan, workloadReplicas, currentBatch int) int {
|
||||
batchSize, _ := intstr.GetValueFromIntOrPercent(&rolloutSpec.Batches[currentBatch].CanaryReplicas, workloadReplicas, true)
|
||||
if batchSize > workloadReplicas {
|
||||
klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is more than workload.replicas %v", currentBatch, batchSize, workloadReplicas)
|
||||
batchSize = workloadReplicas
|
||||
} else if batchSize < 0 {
|
||||
klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is less than 0 %v", currentBatch, batchSize)
|
||||
batchSize = 0
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("calculated the number of new pod size", "current batch", currentBatch, "new pod target", batchSize)
|
||||
return batchSize
|
||||
}
|
||||
|
||||
func sortPodsByOrdinal(pods []*corev1.Pod) {
|
||||
sort.Slice(pods, func(i, j int) bool {
|
||||
ordI, _ := strconv.Atoi(pods[i].Name[strings.LastIndex(pods[i].Name, "-"):])
|
||||
ordJ, _ := strconv.Atoi(pods[j].Name[strings.LastIndex(pods[j].Name, "-"):])
|
||||
return ordJ > ordI
|
||||
})
|
||||
}
|
||||
|
||||
func getPodOrdinal(pod *corev1.Pod) int {
|
||||
ord, _ := strconv.Atoi(pod.Name[strings.LastIndex(pod.Name, "-")+1:])
|
||||
return ord
|
||||
}
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
package workloads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestFilterPodsForUnorderedRollback(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
GetPods func() []*corev1.Pod
|
||||
ExpectWithLabels int
|
||||
ExpectWithoutLabels int
|
||||
Replicas int32
|
||||
NoNeedRollbackReplicas int32
|
||||
PlannedBatchCanaryReplicas int32
|
||||
ExpectedBatchStableReplicas int32
|
||||
}{
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=10, noNeedRollback=5, stepCanary=20%, realCanary=6",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(10, 5)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 5,
|
||||
PlannedBatchCanaryReplicas: 2,
|
||||
ExpectedBatchStableReplicas: 4,
|
||||
ExpectWithoutLabels: 5,
|
||||
ExpectWithLabels: 1,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=10, noNeedRollback=5, stepCanary=60%, realCanary=8",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(10, 5)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 5,
|
||||
PlannedBatchCanaryReplicas: 6,
|
||||
ExpectedBatchStableReplicas: 2,
|
||||
ExpectWithoutLabels: 5,
|
||||
ExpectWithLabels: 3,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=10, noNeedRollback=5, stepCanary=100%, realCanary=10",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(10, 5)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 5,
|
||||
PlannedBatchCanaryReplicas: 10,
|
||||
ExpectedBatchStableReplicas: 0,
|
||||
ExpectWithoutLabels: 5,
|
||||
ExpectWithLabels: 5,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=9, noNeedRollback=7, stepCanary=20%, realCanary=6",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(9, 7)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 7,
|
||||
PlannedBatchCanaryReplicas: 2,
|
||||
ExpectedBatchStableReplicas: 2,
|
||||
ExpectWithoutLabels: 2,
|
||||
ExpectWithLabels: 1,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=9, noNeedRollback=7, stepCanary=60%, realCanary=8",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(9, 7)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 7,
|
||||
PlannedBatchCanaryReplicas: 6,
|
||||
ExpectedBatchStableReplicas: 1,
|
||||
ExpectWithoutLabels: 2,
|
||||
ExpectWithLabels: 4,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=9, noNeedRollback=7, stepCanary=100%, realCanary=10",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(9, 7)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 7,
|
||||
PlannedBatchCanaryReplicas: 10,
|
||||
ExpectedBatchStableReplicas: 0,
|
||||
ExpectWithoutLabels: 2,
|
||||
ExpectWithLabels: 7,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=6, noNeedRollback=5, stepCanary=20%, realCanary=6",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(6, 5)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 5,
|
||||
PlannedBatchCanaryReplicas: 2,
|
||||
ExpectedBatchStableReplicas: 4,
|
||||
ExpectWithoutLabels: 1,
|
||||
ExpectWithLabels: 1,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=6, noNeedRollback=5, stepCanary=60%, realCanary=8",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(6, 5)
|
||||
},
|
||||
Replicas: 10,
|
||||
NoNeedRollbackReplicas: 5,
|
||||
PlannedBatchCanaryReplicas: 6,
|
||||
ExpectedBatchStableReplicas: 2,
|
||||
ExpectWithoutLabels: 1,
|
||||
ExpectWithLabels: 3,
|
||||
},
|
||||
}
|
||||
|
||||
check := func(pods []*corev1.Pod, expectWith, expectWithout int) bool {
|
||||
var with, without int
|
||||
for _, pod := range pods {
|
||||
if pod.Labels[util.NoNeedUpdatePodLabel] == "0x1" {
|
||||
with++
|
||||
} else {
|
||||
without++
|
||||
}
|
||||
}
|
||||
return with == expectWith && without == expectWithout
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.Name, func(t *testing.T) {
|
||||
pods := cs.GetPods()
|
||||
for i := 0; i < 10; i++ {
|
||||
rand.Shuffle(len(pods), func(i, j int) {
|
||||
pods[i], pods[j] = pods[j], pods[i]
|
||||
})
|
||||
filteredPods := filterPodsForUnorderedRollback(pods, cs.PlannedBatchCanaryReplicas, cs.ExpectedBatchStableReplicas, cs.Replicas, "0x1", "version-1")
|
||||
var podName []string
|
||||
for i := range filteredPods {
|
||||
podName = append(podName, filteredPods[i].Name)
|
||||
}
|
||||
fmt.Println(podName)
|
||||
Expect(check(filteredPods, cs.ExpectWithLabels, cs.ExpectWithoutLabels)).To(BeTrue())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterPodsForOrderedRollback(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
GetPods func() []*corev1.Pod
|
||||
ExpectWithLabels int
|
||||
ExpectWithoutLabels int
|
||||
Replicas int32
|
||||
PlannedBatchCanaryReplicas int32
|
||||
ExpectedBatchStableReplicas int32
|
||||
}{
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=10, stepCanary=40%, realCanary=2",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(10, 8)
|
||||
},
|
||||
Replicas: 10,
|
||||
PlannedBatchCanaryReplicas: 4,
|
||||
ExpectedBatchStableReplicas: 8,
|
||||
ExpectWithoutLabels: 2,
|
||||
ExpectWithLabels: 2,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=10, stepCanary=60%, realCanary=2",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(10, 8)
|
||||
},
|
||||
Replicas: 10,
|
||||
PlannedBatchCanaryReplicas: 6,
|
||||
ExpectedBatchStableReplicas: 8,
|
||||
ExpectWithoutLabels: 2,
|
||||
ExpectWithLabels: 4,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=10, stepCanary=100%, realCanary=10",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(10, 0)
|
||||
},
|
||||
Replicas: 10,
|
||||
PlannedBatchCanaryReplicas: 10,
|
||||
ExpectedBatchStableReplicas: 0,
|
||||
ExpectWithoutLabels: 10,
|
||||
ExpectWithLabels: 0,
|
||||
},
|
||||
{
|
||||
Name: "replicas=10, updatedReplicas=9, stepCanary=20%, realCanary=2",
|
||||
GetPods: func() []*corev1.Pod {
|
||||
return generatePods(9, 8)
|
||||
},
|
||||
Replicas: 10,
|
||||
PlannedBatchCanaryReplicas: 2,
|
||||
ExpectedBatchStableReplicas: 8,
|
||||
ExpectWithoutLabels: 1,
|
||||
ExpectWithLabels: 0,
|
||||
},
|
||||
}
|
||||
|
||||
check := func(pods []*corev1.Pod, expectWith, expectWithout int) bool {
|
||||
var with, without int
|
||||
for _, pod := range pods {
|
||||
if pod.Labels[util.NoNeedUpdatePodLabel] == "0x1" {
|
||||
with++
|
||||
} else {
|
||||
without++
|
||||
}
|
||||
}
|
||||
return with == expectWith && without == expectWithout
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.Name, func(t *testing.T) {
|
||||
pods := cs.GetPods()
|
||||
for i := 0; i < 10; i++ {
|
||||
rand.Shuffle(len(pods), func(i, j int) {
|
||||
pods[i], pods[j] = pods[j], pods[i]
|
||||
})
|
||||
filteredPods := filterPodsForOrderedRollback(pods, cs.PlannedBatchCanaryReplicas, cs.ExpectedBatchStableReplicas, cs.Replicas, "0x1", "version-1")
|
||||
var podName []string
|
||||
for i := range filteredPods {
|
||||
podName = append(podName, filteredPods[i].Name)
|
||||
}
|
||||
fmt.Println(podName)
|
||||
Expect(check(filteredPods, cs.ExpectWithLabels, cs.ExpectWithoutLabels)).To(BeTrue())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortPodsByOrdinal(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
pods := generatePods(100, 10)
|
||||
rand.Shuffle(len(pods), func(i, j int) {
|
||||
pods[i], pods[j] = pods[j], pods[i]
|
||||
})
|
||||
sortPodsByOrdinal(pods)
|
||||
for i, pod := range pods {
|
||||
expectedName := fmt.Sprintf("pod-name-%d", 99-i)
|
||||
Expect(pod.Name == expectedName).Should(BeTrue())
|
||||
}
|
||||
}
|
||||
|
||||
func generatePods(updatedReplicas, noNeedRollbackReplicas int) []*corev1.Pod {
|
||||
podsNoNeed := generatePodsWith(map[string]string{
|
||||
util.NoNeedUpdatePodLabel: "0x1",
|
||||
apps.ControllerRevisionHashLabelKey: "version-1",
|
||||
}, noNeedRollbackReplicas, 0)
|
||||
return append(generatePodsWith(map[string]string{
|
||||
apps.ControllerRevisionHashLabelKey: "version-1",
|
||||
}, updatedReplicas-noNeedRollbackReplicas, noNeedRollbackReplicas), podsNoNeed...)
|
||||
}
|
||||
|
||||
func generatePodsWith(labels map[string]string, replicas int, beginOrder int) []*corev1.Pod {
|
||||
pods := make([]*corev1.Pod, replicas)
|
||||
for i := 0; i < replicas; i++ {
|
||||
pods[i] = &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-name-%d", beginOrder+i),
|
||||
Labels: labels,
|
||||
},
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func containers(version string) []corev1.Container {
|
||||
return []corev1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: fmt.Sprintf("busybox:%v", version),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -38,6 +38,8 @@ const (
|
|||
WorkloadHasGone WorkloadEventType = "workload-has-gone"
|
||||
// WorkloadUnHealthy means workload is at some unexpected state that our controller cannot handle, we should stop reconcile.
|
||||
WorkloadUnHealthy WorkloadEventType = "workload-is-unhealthy"
|
||||
// WorkloadRollbackInBatch means workload is rollback according to BatchRelease batch plan.
|
||||
WorkloadRollbackInBatch WorkloadEventType = "workload-rollback-in-batch"
|
||||
)
|
||||
|
||||
type workloadController struct {
|
||||
|
|
@ -60,7 +62,7 @@ type WorkloadController interface {
|
|||
// it returns 'true' if the preparation is succeeded.
|
||||
// it returns 'false' if the preparation should retry.
|
||||
// it returns not-empty error if the preparation has something wrong, and should not retry.
|
||||
PrepareBeforeProgress() (bool, error)
|
||||
PrepareBeforeProgress() (bool, *int32, error)
|
||||
|
||||
// UpgradeOneBatch tries to upgrade old replicas following the release plan.
|
||||
// it will upgrade the old replicas as the release plan allows in the current batch.
|
||||
|
|
@ -110,15 +110,15 @@ func (c *DeploymentsRolloutController) VerifyWorkload() (bool, error) {
|
|||
}
|
||||
|
||||
// PrepareBeforeProgress makes sure that the Deployment is under our control
|
||||
func (c *DeploymentsRolloutController) PrepareBeforeProgress() (bool, error) {
|
||||
func (c *DeploymentsRolloutController) PrepareBeforeProgress() (bool, *int32, error) {
|
||||
// the workload is verified, and we should record revision and replicas info before progressing
|
||||
if err := c.recordDeploymentRevisionAndReplicas(); err != nil {
|
||||
klog.Errorf("Failed to record deployment(%v) revision and replicas info, error: %v", c.stableNamespacedName, err)
|
||||
return false, err
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
c.recorder.Event(c.release, v1.EventTypeNormal, "Initialized", "Rollout resource are initialized")
|
||||
return true, nil
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// UpgradeOneBatch calculates the number of pods we can upgrade once
|
||||
|
|
@ -371,8 +371,8 @@ func (c *DeploymentsRolloutController) recordDeploymentRevisionAndReplicas() err
|
|||
}
|
||||
|
||||
func (c *DeploymentsRolloutController) patchPodBatchLabel(canaryGoal int32) (bool, error) {
|
||||
rolloutID, exist := c.release.Labels[util.RolloutIDLabel]
|
||||
if !exist || rolloutID == "" || c.canary == nil {
|
||||
rolloutID := c.release.Spec.ReleasePlan.RolloutID
|
||||
if rolloutID == "" || c.canary == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
@ -384,5 +384,5 @@ func (c *DeploymentsRolloutController) patchPodBatchLabel(canaryGoal int32) (boo
|
|||
|
||||
batchID := c.release.Status.CanaryStatus.CurrentBatch + 1
|
||||
updateRevision := c.release.Status.UpdateRevision
|
||||
return util.PatchPodBatchLabel(c.client, pods, rolloutID, batchID, updateRevision, canaryGoal, c.releaseKey)
|
||||
return patchPodBatchLabel(c.client, pods, rolloutID, batchID, updateRevision, canaryGoal, c.releaseKey)
|
||||
}
|
||||
|
|
@ -295,7 +295,7 @@ func (c *deploymentController) listCanaryDeployment(options ...client.ListOption
|
|||
|
||||
// the target workload size for the current batch
|
||||
func (c *deploymentController) calculateCurrentCanary(totalSize int32) int32 {
|
||||
targetSize := int32(util.CalculateNewBatchTarget(&c.release.Spec.ReleasePlan, int(totalSize), int(c.newStatus.CanaryStatus.CurrentBatch)))
|
||||
targetSize := int32(calculateNewBatchTarget(&c.release.Spec.ReleasePlan, int(totalSize), int(c.newStatus.CanaryStatus.CurrentBatch)))
|
||||
klog.InfoS("Calculated the number of pods in the canary Deployment after current batch",
|
||||
"Deployment", c.stableNamespacedName, "BatchRelease", c.releaseKey,
|
||||
"current batch", c.newStatus.CanaryStatus.CurrentBatch, "workload updateRevision size", targetSize)
|
||||
|
|
@ -60,15 +60,12 @@ var (
|
|||
Batches: []v1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("10%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("50%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("80%"),
|
||||
PauseSeconds: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -24,7 +24,6 @@ import (
|
|||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
|
@ -39,7 +38,7 @@ type StatefulSetLikeController struct {
|
|||
recorder record.EventRecorder
|
||||
planController *appsv1alpha1.BatchRelease
|
||||
namespacedName types.NamespacedName
|
||||
workloadObj *unstructured.Unstructured
|
||||
workloadObj client.Object
|
||||
gvk schema.GroupVersionKind
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
|
@ -54,13 +53,16 @@ func NewStatefulSetLikeController(c client.Client, r record.EventRecorder, p *ap
|
|||
}
|
||||
}
|
||||
|
||||
func (c *StatefulSetLikeController) GetWorkloadObject() (*unstructured.Unstructured, error) {
|
||||
func (c *StatefulSetLikeController) GetWorkloadObject() (client.Object, error) {
|
||||
if c.workloadObj == nil {
|
||||
c.workloadObj = &unstructured.Unstructured{}
|
||||
c.workloadObj.SetGroupVersionKind(c.gvk)
|
||||
if err := c.Get(context.TODO(), c.namespacedName, c.workloadObj); err != nil {
|
||||
workloadObj := util.GetEmptyWorkloadObject(c.gvk)
|
||||
if workloadObj == nil {
|
||||
return nil, errors.NewNotFound(schema.GroupResource{Group: c.gvk.Group, Resource: c.gvk.Kind}, c.namespacedName.Name)
|
||||
}
|
||||
if err := c.Get(context.TODO(), c.namespacedName, workloadObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.workloadObj = workloadObj
|
||||
}
|
||||
return c.workloadObj, nil
|
||||
}
|
||||
|
|
@ -90,10 +92,10 @@ func (c *StatefulSetLikeController) ClaimWorkload() (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
err = util.ClaimWorkload(c.Client, c.planController, set, map[string]interface{}{
|
||||
err = claimWorkload(c.Client, c.planController, set, map[string]interface{}{
|
||||
"type": apps.RollingUpdateStatefulSetStrategyType,
|
||||
"rollingUpdate": map[string]interface{}{
|
||||
"partition": pointer.Int32(util.ParseReplicasFrom(set)),
|
||||
"partition": pointer.Int32(util.GetReplicas(set)),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -113,7 +115,7 @@ func (c *StatefulSetLikeController) ReleaseWorkload(cleanup bool) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
err = util.ReleaseWorkload(c.Client, set)
|
||||
err = releaseWorkload(c.Client, set)
|
||||
if err != nil {
|
||||
c.recorder.Eventf(c.planController, v1.EventTypeWarning, "ReleaseFailed", err.Error())
|
||||
return false, err
|
||||
|
|
@ -130,11 +132,12 @@ func (c *StatefulSetLikeController) UpgradeBatch(canaryReplicasGoal, stableRepli
|
|||
}
|
||||
|
||||
// if no needs to patch partition
|
||||
if isStatefulSetUpgradedDone(set, canaryReplicasGoal, stableReplicasGoal) {
|
||||
partition := util.GetStatefulSetPartition(set)
|
||||
if partition <= stableReplicasGoal {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
err = util.PatchSpec(c.Client, set, map[string]interface{}{
|
||||
err = patchSpec(c.Client, set, map[string]interface{}{
|
||||
"updateStrategy": map[string]interface{}{
|
||||
"rollingUpdate": map[string]interface{}{
|
||||
"partition": pointer.Int32(stableReplicasGoal),
|
||||
|
|
@ -149,6 +152,15 @@ func (c *StatefulSetLikeController) UpgradeBatch(canaryReplicasGoal, stableRepli
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (c *StatefulSetLikeController) IsOrderedUpdate() (bool, error) {
|
||||
set, err := c.GetWorkloadObject()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return !util.IsStatefulSetUnorderedUpdate(set), nil
|
||||
}
|
||||
|
||||
func (c *StatefulSetLikeController) IsBatchReady(canaryReplicasGoal, stableReplicasGoal int32) (bool, error) {
|
||||
workloadInfo, err := c.GetWorkloadInfo()
|
||||
if err != nil {
|
||||
|
|
@ -217,13 +229,3 @@ func (c *StatefulSetLikeController) countUpdatedReadyPods(updateRevision string)
|
|||
}
|
||||
return updatedReadyReplicas, nil
|
||||
}
|
||||
|
||||
func isStatefulSetUpgradedDone(set *unstructured.Unstructured, canaryReplicasGoal, stableReplicasGoal int32) bool {
|
||||
partition := util.GetStatefulSetPartition(set)
|
||||
if partition <= stableReplicasGoal {
|
||||
return true
|
||||
}
|
||||
updatedReplicas := util.ParseStatusIntFrom(set, "updatedReplicas")
|
||||
observedGeneration := util.ParseStatusIntFrom(set, "observedGeneration")
|
||||
return set.GetGeneration() == observedGeneration && int(updatedReplicas) >= int(canaryReplicasGoal)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package workloads
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
|
|
@ -25,9 +26,9 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -38,6 +39,7 @@ type UnifiedWorkloadController interface {
|
|||
UpgradeBatch(canaryReplicasGoal, stableReplicasGoal int32) (bool, error)
|
||||
IsBatchReady(canaryReplicasGoal, stableReplicasGoal int32) (bool, error)
|
||||
ListOwnedPods() ([]*v1.Pod, error)
|
||||
IsOrderedUpdate() (bool, error)
|
||||
}
|
||||
|
||||
// UnifiedWorkloadRolloutControlPlane is responsible for handling rollout StatefulSet type of workloads
|
||||
|
|
@ -80,7 +82,7 @@ func (c *UnifiedWorkloadRolloutControlPlane) VerifyWorkload() (bool, error) {
|
|||
}
|
||||
|
||||
// If the workload status is untrustworthy
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Metadata.Generation {
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Generation {
|
||||
message = fmt.Sprintf("%v is still reconciling, wait for it to be done", workloadInfo.GVKWithName)
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -101,26 +103,95 @@ func (c *UnifiedWorkloadRolloutControlPlane) VerifyWorkload() (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// prepareBeforeRollback makes sure that the updated pods have been patched no-need-update label.
|
||||
// return values:
|
||||
// - bool: whether all updated pods have been patched no-need-update label;
|
||||
// - *int32: how many pods have been patched;
|
||||
// - err: whether error occurs.
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) prepareBeforeRollback() (bool, *int32, error) {
|
||||
if c.release.Annotations[util.RollbackInBatchAnnotation] == "" {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
noNeedRollbackReplicas := int32(0)
|
||||
rolloutID := c.release.Spec.ReleasePlan.RolloutID
|
||||
if rolloutID == "" {
|
||||
return true, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
workloadInfo, err := c.GetWorkloadInfo()
|
||||
if err != nil {
|
||||
return false, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
pods, err := c.ListOwnedPods()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods for %v", workloadInfo.GVKWithName)
|
||||
return false, &noNeedRollbackReplicas, err
|
||||
}
|
||||
|
||||
updateRevision := workloadInfo.Status.UpdateRevision
|
||||
var filterPods []*v1.Pod
|
||||
for i := range pods {
|
||||
if !pods[i].DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
if !util.IsConsistentWithRevision(pods[i], updateRevision) {
|
||||
continue
|
||||
}
|
||||
if id, ok := pods[i].Labels[util.NoNeedUpdatePodLabel]; ok && id == rolloutID {
|
||||
noNeedRollbackReplicas++
|
||||
continue
|
||||
}
|
||||
filterPods = append(filterPods, pods[i])
|
||||
}
|
||||
|
||||
if len(filterPods) == 0 {
|
||||
return true, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
for _, pod := range filterPods {
|
||||
podClone := pod.DeepCopy()
|
||||
body := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, util.NoNeedUpdatePodLabel, rolloutID)
|
||||
err = c.client.Patch(context.TODO(), podClone, client.RawPatch(types.StrategicMergePatchType, []byte(body)))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to patch rollback labels[%s]=%s to pod %v", util.NoNeedUpdatePodLabel, rolloutID, client.ObjectKeyFromObject(pod))
|
||||
return false, &noNeedRollbackReplicas, err
|
||||
} else {
|
||||
klog.Info("Succeeded to patch rollback labels[%s]=%s to pod %v", util.NoNeedUpdatePodLabel, rolloutID, client.ObjectKeyFromObject(pod))
|
||||
}
|
||||
noNeedRollbackReplicas++
|
||||
}
|
||||
klog.Infof("BatchRelease(%v) find %v replicas no need to rollback", client.ObjectKeyFromObject(c.release), noNeedRollbackReplicas)
|
||||
return false, &noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
// PrepareBeforeProgress makes sure that the source and target workload is under our control
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) PrepareBeforeProgress() (bool, error) {
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) PrepareBeforeProgress() (bool, *int32, error) {
|
||||
done, noNeedRollbackReplicas, err := c.prepareBeforeRollback()
|
||||
if err != nil || !done {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// claim the workload is under our control
|
||||
done, err := c.ClaimWorkload()
|
||||
done, err = c.ClaimWorkload()
|
||||
if !done || err != nil {
|
||||
return false, err
|
||||
return false, noNeedRollbackReplicas, err
|
||||
}
|
||||
|
||||
// record revisions and replicas info to BatchRelease.Status
|
||||
err = c.RecordWorkloadRevisionAndReplicas()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, noNeedRollbackReplicas, err
|
||||
}
|
||||
|
||||
c.recorder.Event(c.release, v1.EventTypeNormal, "InitializedSuccessfully", "Rollout resource are initialized")
|
||||
return true, nil
|
||||
return true, noNeedRollbackReplicas, nil
|
||||
}
|
||||
|
||||
// UpgradeOneBatch calculates the number of pods we can upgrade once according to the rollout spec
|
||||
// and then set the partition accordingly
|
||||
// TODO: support advanced statefulSet reserveOrdinal feature0
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) UpgradeOneBatch() (bool, error) {
|
||||
workloadInfo, err := c.GetWorkloadInfo()
|
||||
if err != nil {
|
||||
|
|
@ -133,32 +204,53 @@ func (c *UnifiedWorkloadRolloutControlPlane) UpgradeOneBatch() (bool, error) {
|
|||
}
|
||||
|
||||
// if the workload status is untrustworthy
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Metadata.Generation {
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Generation {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pods, err := c.ListOwnedPods()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var noNeedRollbackReplicas int32
|
||||
if c.newStatus.CanaryStatus.NoNeedUpdateReplicas != nil {
|
||||
rolloutID := c.release.Spec.ReleasePlan.RolloutID
|
||||
noNeedRollbackReplicas = countNoNeedRollbackReplicas(pods, c.newStatus.UpdateRevision, rolloutID)
|
||||
c.newStatus.CanaryStatus.NoNeedUpdateReplicas = pointer.Int32(noNeedRollbackReplicas)
|
||||
}
|
||||
replicas := c.newStatus.ObservedWorkloadReplicas
|
||||
currentBatch := c.newStatus.CanaryStatus.CurrentBatch
|
||||
// the number of canary pods should have in current batch
|
||||
canaryGoal := c.calculateCurrentCanary(c.release.Status.ObservedWorkloadReplicas)
|
||||
// the number of stable pods should have in current batch
|
||||
stableGoal := c.calculateCurrentStable(c.release.Status.ObservedWorkloadReplicas)
|
||||
// the number of canary pods now we have in current state
|
||||
currentCanaryReplicas := workloadInfo.Status.UpdatedReplicas
|
||||
|
||||
// in case of no need to upgrade pods
|
||||
klog.V(3).InfoS("upgraded one batch, status info:",
|
||||
// the number of canary pods should have in current batch in plan
|
||||
plannedBatchCanaryReplicas := c.calculateCurrentCanary(c.newStatus.ObservedWorkloadReplicas)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchCanaryReplicas := c.calculateCurrentCanary(replicas - noNeedRollbackReplicas)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchStableReplicas := replicas - expectedBatchCanaryReplicas
|
||||
|
||||
// if ordered update, partition is related with pod ordinals
|
||||
// if unordered update, partition just like cloneSet partition
|
||||
orderedUpdate, _ := c.IsOrderedUpdate()
|
||||
if !orderedUpdate {
|
||||
expectedBatchStableReplicas -= noNeedRollbackReplicas
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("upgrade one batch, current info:",
|
||||
"BatchRelease", client.ObjectKeyFromObject(c.release),
|
||||
"current-batch", currentBatch,
|
||||
"canary-goal", canaryGoal,
|
||||
"stable-goal", stableGoal,
|
||||
"canary-replicas", currentCanaryReplicas)
|
||||
"currentBatch", currentBatch,
|
||||
"replicas", replicas,
|
||||
"noNeedRollbackReplicas", noNeedRollbackReplicas,
|
||||
"plannedBatchCanaryReplicas", plannedBatchCanaryReplicas,
|
||||
"expectedBatchCanaryReplicas", expectedBatchCanaryReplicas,
|
||||
"expectedBatchStableReplicas", expectedBatchStableReplicas)
|
||||
|
||||
isUpgradedDone, err := c.UpgradeBatch(canaryGoal, stableGoal)
|
||||
isUpgradedDone, err := c.UpgradeBatch(expectedBatchCanaryReplicas, expectedBatchStableReplicas)
|
||||
if err != nil || !isUpgradedDone {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isPatchedDone, err := c.patchPodBatchLabel(workloadInfo, canaryGoal)
|
||||
isPatchedDone, err := c.patchPodBatchLabel(pods, plannedBatchCanaryReplicas, expectedBatchStableReplicas)
|
||||
if err != nil || !isPatchedDone {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -181,51 +273,47 @@ func (c *UnifiedWorkloadRolloutControlPlane) CheckOneBatchReady() (bool, error)
|
|||
}
|
||||
|
||||
// if the workload status is untrustworthy
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Metadata.Generation {
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Generation {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// the number of canary pods now we have in current state
|
||||
canaryReplicas := workloadInfo.Status.UpdatedReplicas
|
||||
// the number of stable pods now we have in current state
|
||||
stableReplicas := workloadInfo.Status.Replicas - canaryReplicas
|
||||
// the number of canary pods that have been ready in current state
|
||||
canaryReadyReplicas := workloadInfo.Status.UpdatedReadyReplicas
|
||||
// the number of the real canary pods should have in current batch
|
||||
canaryGoal := c.calculateCurrentCanary(c.release.Status.ObservedWorkloadReplicas)
|
||||
// the number of the real stable pods should have in current batch
|
||||
stableGoal := c.calculateCurrentStable(c.release.Status.ObservedWorkloadReplicas)
|
||||
// the number of max unavailable canary pods allowed by this workload
|
||||
maxUnavailable := 0
|
||||
if workloadInfo.MaxUnavailable != nil {
|
||||
maxUnavailable, _ = intstr.GetValueFromIntOrPercent(workloadInfo.MaxUnavailable, int(c.release.Status.ObservedWorkloadReplicas), true)
|
||||
var noNeedRollbackReplicas int32
|
||||
if c.newStatus.CanaryStatus.NoNeedUpdateReplicas != nil {
|
||||
noNeedRollbackReplicas = *c.newStatus.CanaryStatus.NoNeedUpdateReplicas
|
||||
}
|
||||
|
||||
klog.InfoS("checking the batch releasing progress",
|
||||
replicas := c.newStatus.ObservedWorkloadReplicas
|
||||
currentBatch := c.newStatus.CanaryStatus.CurrentBatch
|
||||
|
||||
// the number of canary pods should have in current batch in plan
|
||||
plannedBatchCanaryReplicas := c.calculateCurrentCanary(c.newStatus.ObservedWorkloadReplicas)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchCanaryReplicas := c.calculateCurrentCanary(replicas - noNeedRollbackReplicas)
|
||||
// the number of canary pods that consider rollback context and other real-world situations
|
||||
expectedBatchStableReplicas := replicas - expectedBatchCanaryReplicas
|
||||
|
||||
// if ordered update, partition is related with pod ordinals
|
||||
// if unordered update, partition just like cloneSet partition
|
||||
orderedUpdate, _ := c.IsOrderedUpdate()
|
||||
if !orderedUpdate {
|
||||
expectedBatchStableReplicas -= noNeedRollbackReplicas
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("check one batch, current info:",
|
||||
"BatchRelease", client.ObjectKeyFromObject(c.release),
|
||||
"current-batch", c.release.Status.CanaryStatus.CurrentBatch,
|
||||
"canary-goal", canaryGoal,
|
||||
"stable-goal", stableGoal,
|
||||
"stable-replicas", stableReplicas,
|
||||
"canary-ready-replicas", canaryReadyReplicas,
|
||||
"maxUnavailable", maxUnavailable)
|
||||
"currentBatch", currentBatch,
|
||||
"replicas", replicas,
|
||||
"noNeedRollbackReplicas", noNeedRollbackReplicas,
|
||||
"plannedBatchCanaryReplicas", plannedBatchCanaryReplicas,
|
||||
"expectedBatchCanaryReplicas", expectedBatchCanaryReplicas,
|
||||
"expectedBatchStableReplicas", expectedBatchStableReplicas)
|
||||
|
||||
// maybe, the workload replicas was scaled, we should requeue and handle the workload scaling event
|
||||
if workloadInfo.Status.Replicas != c.release.Status.ObservedWorkloadReplicas {
|
||||
err := fmt.Errorf("%v replicas don't match ObservedWorkloadReplicas, workload status replicas: %v, observed workload replicas: %v",
|
||||
workloadInfo.GVKWithName, workloadInfo.Status.Replicas, c.release.Status.ObservedWorkloadReplicas)
|
||||
klog.ErrorS(err, "the batch is not valid", "current-batch", c.release.Status.CanaryStatus.CurrentBatch)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ready, err := c.IsBatchReady(canaryGoal, stableGoal); err != nil || !ready {
|
||||
if ready, err := c.IsBatchReady(expectedBatchCanaryReplicas, expectedBatchStableReplicas); err != nil || !ready {
|
||||
klog.InfoS("the batch is not ready yet", "Workload", workloadInfo.GVKWithName,
|
||||
"ReleasePlan", client.ObjectKeyFromObject(c.release), "current-batch", c.release.Status.CanaryStatus.CurrentBatch)
|
||||
"BatchRelease", client.ObjectKeyFromObject(c.release), "current-batch", c.release.Status.CanaryStatus.CurrentBatch)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
klog.Infof("All pods of %v in current batch are ready, BatchRelease(%v), current-batch=%v",
|
||||
workloadInfo.GVKWithName, client.ObjectKeyFromObject(c.release), c.release.Status.CanaryStatus.CurrentBatch)
|
||||
c.recorder.Eventf(c.release, v1.EventTypeNormal, "BatchAvailable", "Batch %d is available", c.release.Status.CanaryStatus.CurrentBatch)
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -255,9 +343,9 @@ func (c *UnifiedWorkloadRolloutControlPlane) SyncWorkloadInfo() (WorkloadEventTy
|
|||
}
|
||||
|
||||
// in case that the workload status is untrustworthy
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Metadata.Generation {
|
||||
if workloadInfo.Status.ObservedGeneration != workloadInfo.Generation {
|
||||
klog.Warningf("%v is still reconciling, waiting for it to complete, generation: %v, observed: %v",
|
||||
workloadInfo.GVKWithName, workloadInfo.Metadata.Generation, workloadInfo.Status.ObservedGeneration)
|
||||
workloadInfo.GVKWithName, workloadInfo.Generation, workloadInfo.Status.ObservedGeneration)
|
||||
return WorkloadStillReconciling, nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -273,6 +361,16 @@ func (c *UnifiedWorkloadRolloutControlPlane) SyncWorkloadInfo() (WorkloadEventTy
|
|||
return WorkloadReplicasChanged, workloadInfo, nil
|
||||
}
|
||||
|
||||
// updateRevision == CurrentRevision means CloneSet is rolling back or newly-created.
|
||||
if workloadInfo.Status.UpdateRevision == workloadInfo.Status.StableRevision &&
|
||||
// stableRevision == UpdateRevision means CloneSet is rolling back instead of newly-created.
|
||||
c.newStatus.StableRevision == workloadInfo.Status.UpdateRevision &&
|
||||
// StableRevision != observed UpdateRevision means the rollback event have not been observed.
|
||||
c.newStatus.StableRevision != c.newStatus.UpdateRevision {
|
||||
klog.Warningf("Workload(%v) is rolling back in batches", workloadInfo.GVKWithName)
|
||||
return WorkloadRollbackInBatch, workloadInfo, nil
|
||||
}
|
||||
|
||||
// in case of that the workload was changed
|
||||
if workloadInfo.Status.UpdateRevision != c.release.Status.UpdateRevision {
|
||||
klog.Warningf("%v updateRevision changed during releasing, should try to restart the release plan, "+
|
||||
|
|
@ -285,7 +383,7 @@ func (c *UnifiedWorkloadRolloutControlPlane) SyncWorkloadInfo() (WorkloadEventTy
|
|||
|
||||
// the canary workload size for the current batch
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) calculateCurrentCanary(totalSize int32) int32 {
|
||||
canaryGoal := int32(util.CalculateNewBatchTarget(&c.release.Spec.ReleasePlan, int(totalSize), int(c.release.Status.CanaryStatus.CurrentBatch)))
|
||||
canaryGoal := int32(calculateNewBatchTarget(&c.release.Spec.ReleasePlan, int(totalSize), int(c.release.Status.CanaryStatus.CurrentBatch)))
|
||||
klog.InfoS("Calculated the number of pods in the target workload after current batch", "BatchRelease", client.ObjectKeyFromObject(c.release),
|
||||
"current batch", c.release.Status.CanaryStatus.CurrentBatch, "workload canary goal replicas goal", canaryGoal)
|
||||
return canaryGoal
|
||||
|
|
@ -311,19 +409,21 @@ func (c *UnifiedWorkloadRolloutControlPlane) RecordWorkloadRevisionAndReplicas()
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) patchPodBatchLabel(workloadInfo *util.WorkloadInfo, canaryGoal int32) (bool, error) {
|
||||
rolloutID, exist := c.release.Labels[util.RolloutIDLabel]
|
||||
if !exist || rolloutID == "" {
|
||||
func (c *UnifiedWorkloadRolloutControlPlane) patchPodBatchLabel(pods []*v1.Pod, plannedBatchCanaryReplicas, expectedBatchStableReplicas int32) (bool, error) {
|
||||
rolloutID := c.release.Spec.ReleasePlan.RolloutID
|
||||
if rolloutID == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
pods, err := c.ListOwnedPods()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods for %v", workloadInfo.GVKWithName)
|
||||
return false, err
|
||||
}
|
||||
|
||||
batchID := c.release.Status.CanaryStatus.CurrentBatch + 1
|
||||
updateRevision := c.release.Status.UpdateRevision
|
||||
return util.PatchPodBatchLabel(c.client, pods, rolloutID, batchID, updateRevision, canaryGoal, client.ObjectKeyFromObject(c.release))
|
||||
batchID := c.release.Status.CanaryStatus.CurrentBatch + 1
|
||||
if c.newStatus.CanaryStatus.NoNeedUpdateReplicas != nil {
|
||||
orderedUpdate, _ := c.IsOrderedUpdate()
|
||||
if orderedUpdate {
|
||||
pods = filterPodsForOrderedRollback(pods, plannedBatchCanaryReplicas, expectedBatchStableReplicas, c.release.Status.ObservedWorkloadReplicas, rolloutID, updateRevision)
|
||||
} else {
|
||||
pods = filterPodsForUnorderedRollback(pods, plannedBatchCanaryReplicas, expectedBatchStableReplicas, c.release.Status.ObservedWorkloadReplicas, rolloutID, updateRevision)
|
||||
}
|
||||
}
|
||||
return patchPodBatchLabel(c.client, pods, rolloutID, batchID, updateRevision, plannedBatchCanaryReplicas, client.ObjectKeyFromObject(c.release))
|
||||
}
|
||||
|
|
@ -27,7 +27,7 @@ type BatchRelease interface {
|
|||
|
||||
// 1. Promote release workload in step(index), 1<=index<=len(step)
|
||||
// 2. Promote will resume stable workload if the last batch(index=-1) is finished
|
||||
Promote(index int32, checkReady bool) (bool, error)
|
||||
Promote(index int32, isRollback, checkReady bool) (bool, error)
|
||||
|
||||
// FetchBatchRelease fetch batchRelease
|
||||
FetchBatchRelease() (*rolloutv1alpha1.BatchRelease, error)
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ func (r *innerBatchRelease) FetchBatchRelease() (*rolloutv1alpha1.BatchRelease,
|
|||
return batch, nil
|
||||
}
|
||||
|
||||
func (r *innerBatchRelease) Promote(index int32, checkReady bool) (bool, error) {
|
||||
func (r *innerBatchRelease) Promote(index int32, isRollback, checkReady bool) (bool, error) {
|
||||
// Promote will resume stable workload if the last batch(index=-1) is finished
|
||||
if index == -1 {
|
||||
return r.resumeStableWorkload(checkReady)
|
||||
|
|
@ -132,11 +132,23 @@ func (r *innerBatchRelease) Promote(index int32, checkReady bool) (bool, error)
|
|||
klog.Errorf("error getting updated BatchRelease(%s/%s) from client", batch.Namespace, batch.Name)
|
||||
return err
|
||||
}
|
||||
if !batch.Spec.Paused && *batch.Spec.ReleasePlan.BatchPartition == index {
|
||||
if IsPromoted(r.rollout, batch, isRollback) {
|
||||
return nil
|
||||
}
|
||||
batch.Spec.ReleasePlan.BatchPartition = utilpointer.Int32Ptr(index)
|
||||
if isRollback && len(r.rollout.Spec.Strategy.Canary.TrafficRoutings) == 0 {
|
||||
if batch.Annotations == nil {
|
||||
batch.Annotations = map[string]string{}
|
||||
}
|
||||
// only rollback case should update this rollout id for BatchRelease.
|
||||
batch.Spec.ReleasePlan.RolloutID = r.rollout.Spec.RolloutID
|
||||
batch.Annotations[util.RollbackInBatchAnnotation] = r.rollout.Annotations[util.RollbackInBatchAnnotation]
|
||||
}
|
||||
|
||||
batch.Spec.Paused = false
|
||||
if batch.Labels == nil {
|
||||
batch.Labels = map[string]string{}
|
||||
}
|
||||
batch.Spec.ReleasePlan.BatchPartition = utilpointer.Int32Ptr(index)
|
||||
if err := r.Client.Update(context.TODO(), batch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -318,14 +330,11 @@ func createBatchRelease(rollout *rolloutv1alpha1.Rollout, batchName string) *rol
|
|||
},
|
||||
ReleasePlan: rolloutv1alpha1.ReleasePlan{
|
||||
Batches: batches,
|
||||
RolloutID: rollout.Spec.RolloutID,
|
||||
BatchPartition: utilpointer.Int32Ptr(0),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if rollout.Spec.RolloutID != "" {
|
||||
br.Labels[util.RolloutIDLabel] = rollout.Spec.RolloutID
|
||||
}
|
||||
return br
|
||||
}
|
||||
|
||||
|
|
@ -333,3 +342,22 @@ func createBatchRelease(rollout *rolloutv1alpha1.Rollout, batchName string) *rol
|
|||
func rolloutBatchName(rollout *rolloutv1alpha1.Rollout) string {
|
||||
return rollout.Name
|
||||
}
|
||||
|
||||
// IsPromoted return true if the current batch has been promoted:
|
||||
// - 1. BatchRelease BatchPartition == Rollout currentStepIndex-1;
|
||||
// - 2. Rollback annotation has been patched to BatchRelease when rolling back.
|
||||
func IsPromoted(rollout *rolloutv1alpha1.Rollout, batch *rolloutv1alpha1.BatchRelease, isRollback bool) bool {
|
||||
currentBatch := int32(0)
|
||||
if rollout.Status.CanaryStatus != nil {
|
||||
currentBatch = rollout.Status.CanaryStatus.CurrentStepIndex - 1
|
||||
}
|
||||
|
||||
if batch.Spec.ReleasePlan.BatchPartition == nil || *batch.Spec.ReleasePlan.BatchPartition != currentBatch {
|
||||
return false
|
||||
}
|
||||
|
||||
if isRollback && batch.Annotations[util.RollbackInBatchAnnotation] != rollout.Annotations[util.RollbackInBatchAnnotation] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -161,10 +162,10 @@ func (r *rolloutContext) doCanaryUpgrade() (bool, error) {
|
|||
cond.Message = fmt.Sprintf("Rollout is in step(%d/%d), and upgrade workload new versions", canaryStatus.CurrentStepIndex, steps)
|
||||
r.newStatus.Message = cond.Message
|
||||
// promote workload next batch release
|
||||
if *batch.Spec.ReleasePlan.BatchPartition+1 < canaryStatus.CurrentStepIndex {
|
||||
if !batchrelease.IsPromoted(r.rollout, batch, r.workload.IsInRollback) {
|
||||
r.recorder.Eventf(r.rollout, corev1.EventTypeNormal, "Progressing", fmt.Sprintf("start upgrade step(%d) canary pods with new versions", canaryStatus.CurrentStepIndex))
|
||||
klog.Infof("rollout(%s/%s) will promote batch from(%d) -> to(%d)", r.rollout.Namespace, r.rollout.Name, *batch.Spec.ReleasePlan.BatchPartition+1, canaryStatus.CurrentStepIndex)
|
||||
return r.batchControl.Promote(canaryStatus.CurrentStepIndex, false)
|
||||
return r.batchControl.Promote(canaryStatus.CurrentStepIndex, r.workload.IsInRollback, false)
|
||||
}
|
||||
|
||||
// check whether batchRelease is ready
|
||||
|
|
@ -238,7 +239,7 @@ func (r *rolloutContext) doCanaryFinalising() (bool, error) {
|
|||
// isComplete indicates whether rollout progressing complete, and wait for all pods are ready
|
||||
// else indicates rollout is canceled
|
||||
klog.Infof("rollout(%s/%s) in finalizing: upgrade stable workload", r.rollout.Namespace, r.rollout.Name)
|
||||
done, err = r.batchControl.Promote(-1, r.isComplete)
|
||||
done, err = r.batchControl.Promote(-1, false, r.isComplete)
|
||||
if err != nil || !done {
|
||||
return done, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
|
|
@ -73,65 +72,12 @@ func (r *RolloutReconciler) reconcileRolloutProgressing(rollout *rolloutv1alpha1
|
|||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonInRolling:
|
||||
// rollout canceled, indicates rollback(v1 -> v2 -> v1)
|
||||
if workload.IsInRollback {
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload has been rollback, then rollout is canceled")
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback, then rollout canceled", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonCancelling, "The workload has been rolled back and the rollout process will be cancelled")
|
||||
// paused rollout progress
|
||||
} else if rollout.Spec.Strategy.Paused {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, but paused", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonPaused, "Rollout has been paused, you can resume it by kube-cli")
|
||||
// In case of continuous publishing(v1 -> v2 -> v3), then restart publishing
|
||||
} else if newStatus.CanaryStatus.CanaryRevision != "" && workload.CanaryRevision != newStatus.CanaryStatus.CanaryRevision {
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload continuous publishing canaryRevision, then restart publishing")
|
||||
klog.Infof("rollout(%s/%s) workload continuous publishing canaryRevision from(%s) -> to(%s), then restart publishing",
|
||||
rollout.Namespace, rollout.Name, newStatus.CanaryStatus.CanaryRevision, workload.CanaryRevision)
|
||||
done, err := r.doProgressingReset(rollout, newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) doProgressingReset failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonInitializing, "Workload is continuous release")
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset complete", rollout.Namespace, rollout.Name)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
recheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
// rollout canary steps configuration change
|
||||
} else if newStatus.CanaryStatus.RolloutHash != "" && newStatus.CanaryStatus.RolloutHash != rollout.Annotations[util.RolloutHashAnnotation] {
|
||||
batchControl := batchrelease.NewInnerBatchController(r.Client, rollout)
|
||||
newStepIndex, err := r.reCalculateCanaryStepIndex(rollout, batchControl)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) reCalculate Canary StepIndex failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
// canary step configuration change causes current step index change
|
||||
newStatus.CanaryStatus.CurrentStepIndex = newStepIndex
|
||||
newStatus.CanaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
newStatus.CanaryStatus.RolloutHash = rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) canary step configuration change, and stepIndex(%d) state(%s)",
|
||||
rollout.Namespace, rollout.Name, newStatus.CanaryStatus.CurrentStepIndex, newStatus.CanaryStatus.CurrentStepState)
|
||||
} else {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
//check if canary is done
|
||||
if newStatus.CanaryStatus.CurrentStepState == rolloutv1alpha1.CanaryStepStateCompleted {
|
||||
klog.Infof("rollout(%s/%s) progressing rolling done", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, rolloutv1alpha1.ProgressingReasonFinalising, "Rollout has been completed and some closing work is being done")
|
||||
} else { // rollout is in rolling
|
||||
newStatus.CanaryStatus.PodTemplateHash = workload.PodTemplateHash
|
||||
recheckTime, err = r.doProgressingInRolling(rollout, newStatus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
recheckTime, err = r.doProgressingInRolling(rollout, workload, newStatus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// after the normal completion of rollout, enter into the Finalising process
|
||||
case rolloutv1alpha1.ProgressingReasonFinalising:
|
||||
klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason)
|
||||
var done bool
|
||||
|
|
@ -144,7 +90,6 @@ func (r *RolloutReconciler) reconcileRolloutProgressing(rollout *rolloutv1alpha1
|
|||
}
|
||||
|
||||
case rolloutv1alpha1.ProgressingReasonPaused:
|
||||
// rollout canceled, indicates rollback(v1 -> v2 -> v1)
|
||||
if workload.IsInRollback {
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload has been rollback, then rollout is canceled")
|
||||
|
|
@ -199,27 +144,6 @@ func (r *RolloutReconciler) doProgressingInitializing(rollout *rolloutv1alpha1.R
|
|||
return r.verifyCanaryStrategy(rollout, newStatus)
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doProgressingInRolling(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
// fetch target workload
|
||||
workload, err := r.Finder.GetWorkloadForRef(rollout.Namespace, rollout.Spec.ObjectRef.WorkloadRef)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) GetWorkloadForRef failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if workload == nil {
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
klog.Warningf("rollout(%s/%s) Fetch workload Not Found, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
return &expectedTime, nil
|
||||
}
|
||||
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, workload)
|
||||
err = rolloutCon.reconcile()
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Progressing failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return rolloutCon.recheckTime, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doProgressingReset(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (bool, error) {
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, nil)
|
||||
if rolloutCon.rollout.Spec.Strategy.Canary.TrafficRoutings != nil {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,159 @@
|
|||
package rollout
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
rolloutv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
"github.com/openkruise/rollouts/pkg/controller/rollout/batchrelease"
|
||||
"github.com/openkruise/rollouts/pkg/util"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (r *RolloutReconciler) doProgressingInRolling(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
// Handle the 5 special cases firstly, and we had better keep the order of following cases:
|
||||
|
||||
switch {
|
||||
// 1. In case of rollback in a quick way, un-paused and just use workload rolling strategy
|
||||
case isRollingBackDirectly(rollout, workload):
|
||||
return r.handleRollbackDirectly(rollout, workload, newStatus)
|
||||
|
||||
// 2. In case of rollout paused, just stop reconcile
|
||||
case isRolloutPaused(rollout):
|
||||
return r.handleRolloutPaused(rollout, newStatus)
|
||||
|
||||
// 3. In case of rollback in a batch way, use rollout step strategy
|
||||
case isRollingBackInBatches(rollout, workload):
|
||||
return r.handleRollbackInBatches(rollout, workload, newStatus)
|
||||
|
||||
// 4. In case of continuous publishing(v1 -> v2 -> v3), restart publishing
|
||||
case isContinuousRelease(rollout, workload):
|
||||
return r.handleContinuousRelease(rollout, workload, newStatus)
|
||||
|
||||
// 5. In case of rollout plan changed, recalculate and publishing
|
||||
case isRolloutPlanChanged(rollout):
|
||||
return r.handleRolloutPlanChanged(rollout, newStatus)
|
||||
}
|
||||
|
||||
return r.handleNormalRolling(rollout, workload, newStatus)
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRolloutPaused(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
klog.Infof("rollout(%s/%s) is Progressing, but paused", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonPaused, "Rollout has been paused, you can resume it by kube-cli")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleContinuousRelease(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload continuous publishing canaryRevision, then restart publishing")
|
||||
klog.Infof("rollout(%s/%s) workload continuous publishing canaryRevision from(%s) -> to(%s), then restart publishing",
|
||||
rollout.Namespace, rollout.Name, newStatus.CanaryStatus.CanaryRevision, workload.CanaryRevision)
|
||||
|
||||
var recheckTime *time.Time
|
||||
done, err := r.doProgressingReset(rollout, newStatus)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) doProgressingReset failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
} else if done {
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonInitializing, "Workload is continuous release")
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset complete", rollout.Namespace, rollout.Name)
|
||||
} else {
|
||||
// Incomplete, recheck
|
||||
expectedTime := time.Now().Add(time.Duration(defaultGracePeriodSeconds) * time.Second)
|
||||
recheckTime = &expectedTime
|
||||
klog.Infof("rollout(%s/%s) workload is continuous publishing, reset incomplete, and recheck(%s)", rollout.Namespace, rollout.Name, expectedTime.String())
|
||||
}
|
||||
return recheckTime, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRollbackDirectly(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
r.Recorder.Eventf(rollout, corev1.EventTypeNormal, "Progressing", "workload has been rollback, then rollout is canceled")
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback directly, then rollout canceled", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionFalse, rolloutv1alpha1.ProgressingReasonCancelling, "The workload has been rolled back and the rollout process will be cancelled")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRollbackInBatches(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
// restart from the beginning
|
||||
newStatus.CanaryStatus.CurrentStepIndex = 1
|
||||
newStatus.CanaryStatus.CanaryRevision = workload.CanaryRevision
|
||||
newStatus.CanaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
newStatus.CanaryStatus.RolloutHash = rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) workload has been rollback in batches, then restart from beginning", rollout.Namespace, rollout.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleRolloutPlanChanged(rollout *rolloutv1alpha1.Rollout, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
batchControl := batchrelease.NewInnerBatchController(r.Client, rollout)
|
||||
newStepIndex, err := r.reCalculateCanaryStepIndex(rollout, batchControl)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) reCalculate Canary StepIndex failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
// canary step configuration change causes current step index change
|
||||
newStatus.CanaryStatus.CurrentStepIndex = newStepIndex
|
||||
newStatus.CanaryStatus.CurrentStepState = rolloutv1alpha1.CanaryStepStateUpgrade
|
||||
newStatus.CanaryStatus.LastUpdateTime = &metav1.Time{Time: time.Now()}
|
||||
newStatus.CanaryStatus.RolloutHash = rollout.Annotations[util.RolloutHashAnnotation]
|
||||
klog.Infof("rollout(%s/%s) canary step configuration change, and stepIndex(%d) state(%s)",
|
||||
rollout.Namespace, rollout.Name, newStatus.CanaryStatus.CurrentStepIndex, newStatus.CanaryStatus.CurrentStepState)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) handleNormalRolling(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
//check if canary is done
|
||||
if newStatus.CanaryStatus.CurrentStepState == rolloutv1alpha1.CanaryStepStateCompleted {
|
||||
klog.Infof("rollout(%s/%s) progressing rolling done", rollout.Namespace, rollout.Name)
|
||||
progressingStateTransition(newStatus, corev1.ConditionTrue, rolloutv1alpha1.ProgressingReasonFinalising, "Rollout has been completed and some closing work is being done")
|
||||
} else { // rollout is in rolling
|
||||
newStatus.CanaryStatus.PodTemplateHash = workload.PodTemplateHash
|
||||
return r.doNormalRolling(rollout, workload, newStatus)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RolloutReconciler) doNormalRolling(rollout *rolloutv1alpha1.Rollout, workload *util.Workload, newStatus *rolloutv1alpha1.RolloutStatus) (*time.Time, error) {
|
||||
rolloutCon := newRolloutContext(r.Client, r.Recorder, rollout, newStatus, workload)
|
||||
err := rolloutCon.reconcile()
|
||||
if err != nil {
|
||||
klog.Errorf("rollout(%s/%s) Progressing failed: %s", rollout.Namespace, rollout.Name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return rolloutCon.recheckTime, nil
|
||||
}
|
||||
|
||||
/* **********************************************************************
|
||||
help functions
|
||||
*********************************************************************** */
|
||||
func isRolloutPaused(rollout *rolloutv1alpha1.Rollout) bool {
|
||||
return rollout.Spec.Strategy.Paused
|
||||
}
|
||||
|
||||
func isRolloutPlanChanged(rollout *rolloutv1alpha1.Rollout) bool {
|
||||
status := &rollout.Status
|
||||
return status.CanaryStatus.RolloutHash != "" && status.CanaryStatus.RolloutHash != rollout.Annotations[util.RolloutHashAnnotation]
|
||||
}
|
||||
|
||||
func isContinuousRelease(rollout *rolloutv1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
return status.CanaryStatus.CanaryRevision != "" && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && !workload.IsInRollback
|
||||
}
|
||||
|
||||
func isRollingBackDirectly(rollout *rolloutv1alpha1.Rollout, workload *util.Workload) bool {
|
||||
status := &rollout.Status
|
||||
inBatch := util.IsRollbackInBatchPolicy(rollout, workload.Labels)
|
||||
return workload.IsInRollback && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && !inBatch
|
||||
}
|
||||
|
||||
func isRollingBackInBatches(rollout *rolloutv1alpha1.Rollout, workload *util.Workload) bool {
|
||||
// currently, only support the case of no traffic routing
|
||||
if len(rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
return false
|
||||
}
|
||||
status := &rollout.Status
|
||||
inBatch := util.IsRollbackInBatchPolicy(rollout, workload.Labels)
|
||||
return workload.IsInRollback && workload.CanaryRevision != status.CanaryStatus.CanaryRevision && inBatch
|
||||
}
|
||||
|
|
@ -95,6 +95,8 @@ func (r *RolloutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
klog.Infof("Begin to reconcile Rollout %v", klog.KObj(rollout))
|
||||
|
||||
// If workload watcher does not exist, then add the watcher dynamically
|
||||
workloadRef := rollout.Spec.ObjectRef.WorkloadRef
|
||||
workloadGVK := util.GetGVKFrom(workloadRef)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
// For Rollout and BatchRelease
|
||||
const (
|
||||
// BatchReleaseControlAnnotation is controller info about batchRelease when rollout
|
||||
BatchReleaseControlAnnotation = "batchrelease.rollouts.kruise.io/control-info"
|
||||
// InRolloutProgressingAnnotation marks workload as entering the rollout progressing process
|
||||
//and does not allow paused=false during this process
|
||||
InRolloutProgressingAnnotation = "rollouts.kruise.io/in-progressing"
|
||||
// RolloutHashAnnotation record observed rollout spec hash
|
||||
RolloutHashAnnotation = "rollouts.kruise.io/hash"
|
||||
// RollbackInBatchAnnotation allow use disable quick rollback, and will roll back in batch style.
|
||||
RollbackInBatchAnnotation = "rollouts.kruise.io/rollback-in-batch"
|
||||
)
|
||||
|
||||
// For Workloads
|
||||
const (
|
||||
// CanaryDeploymentLabel is to label canary deployment that is created by batchRelease controller
|
||||
CanaryDeploymentLabel = "rollouts.kruise.io/canary-deployment"
|
||||
// CanaryDeploymentFinalizer is a finalizer to resources patched by batchRelease controller
|
||||
CanaryDeploymentFinalizer = "finalizer.rollouts.kruise.io/batch-release"
|
||||
// KruiseRolloutFinalizer is a finalizer for deployment/service/ingress/gateway/etc
|
||||
KruiseRolloutFinalizer = "rollouts.kruise.io/rollout"
|
||||
// WorkloadTypeLabel is a label to identify workload type
|
||||
WorkloadTypeLabel = "rollouts.kruise.io/workload-type"
|
||||
)
|
||||
|
||||
// For Pods
|
||||
const (
|
||||
// RolloutIDLabel is designed to distinguish each workload revision publications.
|
||||
// The value of RolloutIDLabel corresponds Rollout.Spec.RolloutID.
|
||||
RolloutIDLabel = "apps.kruise.io/rollout-id"
|
||||
// RolloutBatchIDLabel is the label key of batch id that will be patched to pods during rollout.
|
||||
// Only when RolloutIDLabel is set, RolloutBatchIDLabel will be patched.
|
||||
// Users can use RolloutIDLabel and RolloutBatchIDLabel to select the pods that are upgraded in some certain batch and release.
|
||||
RolloutBatchIDLabel = "apps.kruise.io/rollout-batch-id"
|
||||
// NoNeedUpdatePodLabel will be patched to pod when rollback in batches if the pods no need to rollback
|
||||
NoNeedUpdatePodLabel = "rollouts.kruise.io/no-need-update"
|
||||
)
|
||||
|
||||
// For Others
|
||||
const (
|
||||
// We omit vowels from the set of available characters to reduce the chances
|
||||
// of "bad words" being formed.
|
||||
alphanums = "bcdfghjklmnpqrstvwxz2456789"
|
||||
|
||||
// CloneSetType DeploymentType and StatefulSetType are values to WorkloadTypeLabel
|
||||
CloneSetType WorkloadType = "cloneset"
|
||||
DeploymentType WorkloadType = "deployment"
|
||||
StatefulSetType WorkloadType = "statefulset"
|
||||
|
||||
AddFinalizerOpType FinalizerOpType = "Add"
|
||||
RemoveFinalizerOpType FinalizerOpType = "Remove"
|
||||
)
|
||||
|
||||
type WorkloadType string
|
||||
|
||||
type FinalizerOpType string
|
||||
|
|
@ -28,7 +28,6 @@ import (
|
|||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
|
|
@ -228,10 +227,12 @@ func (r *ControllerFinder) getStatefulSetLikeWorkload(namespace string, ref *rol
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
unifiedObject := &unstructured.Unstructured{}
|
||||
unifiedObjectKey := types.NamespacedName{Name: ref.Name, Namespace: namespace}
|
||||
unifiedObject.SetGroupVersionKind(schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind))
|
||||
err := r.Get(context.TODO(), unifiedObjectKey, unifiedObject)
|
||||
key := types.NamespacedName{Name: ref.Name, Namespace: namespace}
|
||||
set := GetEmptyWorkloadObject(schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind))
|
||||
if set == nil {
|
||||
return nil, nil
|
||||
}
|
||||
err := r.Get(context.TODO(), key, set)
|
||||
if err != nil {
|
||||
// when error is NotFound, it is ok here.
|
||||
if errors.IsNotFound(err) {
|
||||
|
|
@ -240,8 +241,8 @@ func (r *ControllerFinder) getStatefulSetLikeWorkload(namespace string, ref *rol
|
|||
return nil, err
|
||||
}
|
||||
|
||||
workloadInfo := ParseStatefulSetInfo(unifiedObject, unifiedObjectKey)
|
||||
if workloadInfo.Metadata.Generation != workloadInfo.Status.ObservedGeneration {
|
||||
workloadInfo := ParseStatefulSetInfo(set, key)
|
||||
if workloadInfo.Generation != workloadInfo.Status.ObservedGeneration {
|
||||
return &Workload{IsStatusConsistent: false}, nil
|
||||
}
|
||||
workload := &Workload{
|
||||
|
|
@ -250,7 +251,7 @@ func (r *ControllerFinder) getStatefulSetLikeWorkload(namespace string, ref *rol
|
|||
CanaryRevision: workloadInfo.Status.UpdateRevision,
|
||||
CanaryReplicas: workloadInfo.Status.UpdatedReplicas,
|
||||
CanaryReadyReplicas: workloadInfo.Status.UpdatedReadyReplicas,
|
||||
ObjectMeta: *workloadInfo.Metadata,
|
||||
ObjectMeta: workloadInfo.ObjectMeta,
|
||||
Replicas: *workloadInfo.Replicas,
|
||||
PodTemplateHash: workloadInfo.Status.UpdateRevision,
|
||||
IsStatusConsistent: true,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,362 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func ParseStatefulSetInfo(object client.Object, namespacedName types.NamespacedName) *WorkloadInfo {
|
||||
workloadGVKWithName := fmt.Sprintf("%v(%v)", object.GetObjectKind().GroupVersionKind(), namespacedName)
|
||||
selector, err := getSelector(object)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse selector for workload(%v)", workloadGVKWithName)
|
||||
}
|
||||
return &WorkloadInfo{
|
||||
ObjectMeta: *getMetadata(object),
|
||||
MaxUnavailable: getStatefulSetMaxUnavailable(object),
|
||||
Replicas: pointer.Int32(GetReplicas(object)),
|
||||
Status: ParseWorkloadStatus(object),
|
||||
Selector: selector,
|
||||
GVKWithName: workloadGVKWithName,
|
||||
}
|
||||
}
|
||||
|
||||
func IsStatefulSetRollingUpdate(object client.Object) bool {
|
||||
switch o := object.(type) {
|
||||
case *apps.StatefulSet:
|
||||
return o.Spec.UpdateStrategy.Type == "" || o.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return o.Spec.UpdateStrategy.Type == "" || o.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType
|
||||
case *unstructured.Unstructured:
|
||||
t, _, err := unstructured.NestedString(o.Object, "spec", "updateStrategy", "type")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return t == "" || t == string(apps.RollingUpdateStatefulSetStrategyType)
|
||||
default:
|
||||
panic("unsupported workload type to getStatefulSetMaxUnavailable function")
|
||||
}
|
||||
}
|
||||
|
||||
func SetStatefulSetPartition(object client.Object, partition int32) {
|
||||
switch o := object.(type) {
|
||||
case *apps.StatefulSet:
|
||||
if o.Spec.UpdateStrategy.RollingUpdate == nil {
|
||||
o.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateStatefulSetStrategy{
|
||||
Partition: &partition,
|
||||
}
|
||||
} else {
|
||||
o.Spec.UpdateStrategy.RollingUpdate.Partition = &partition
|
||||
}
|
||||
case *appsv1beta1.StatefulSet:
|
||||
if o.Spec.UpdateStrategy.RollingUpdate == nil {
|
||||
o.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: &partition,
|
||||
}
|
||||
} else {
|
||||
o.Spec.UpdateStrategy.RollingUpdate.Partition = &partition
|
||||
}
|
||||
case *unstructured.Unstructured:
|
||||
spec, ok := o.Object["spec"].(map[string]interface{})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updateStrategy, ok := spec["updateStrategy"].(map[string]interface{})
|
||||
if !ok {
|
||||
spec["updateStrategy"] = map[string]interface{}{
|
||||
"type": apps.RollingUpdateStatefulSetStrategyType,
|
||||
"rollingUpdate": map[string]interface{}{
|
||||
"partition": int64(partition),
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
rollingUpdate, ok := updateStrategy["rollingUpdate"].(map[string]interface{})
|
||||
if !ok {
|
||||
updateStrategy["rollingUpdate"] = map[string]interface{}{
|
||||
"partition": int64(partition),
|
||||
}
|
||||
} else {
|
||||
rollingUpdate["partition"] = int64(partition)
|
||||
}
|
||||
default:
|
||||
panic("unsupported workload type to getStatefulSetMaxUnavailable function")
|
||||
}
|
||||
}
|
||||
|
||||
func GetStatefulSetPartition(object client.Object) int32 {
|
||||
partition := int32(0)
|
||||
switch o := object.(type) {
|
||||
case *apps.StatefulSet:
|
||||
if o.Spec.UpdateStrategy.RollingUpdate != nil && o.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
|
||||
partition = *o.Spec.UpdateStrategy.RollingUpdate.Partition
|
||||
}
|
||||
case *appsv1beta1.StatefulSet:
|
||||
if o.Spec.UpdateStrategy.RollingUpdate != nil && o.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
|
||||
partition = *o.Spec.UpdateStrategy.RollingUpdate.Partition
|
||||
}
|
||||
case *unstructured.Unstructured:
|
||||
field, found, err := unstructured.NestedInt64(o.Object, "spec", "updateStrategy", "rollingUpdate", "partition")
|
||||
if err == nil && found {
|
||||
partition = int32(field)
|
||||
}
|
||||
default:
|
||||
panic("unsupported workload type to getStatefulSetMaxUnavailable function")
|
||||
}
|
||||
return partition
|
||||
}
|
||||
|
||||
func IsStatefulSetUnorderedUpdate(object client.Object) bool {
|
||||
switch o := object.(type) {
|
||||
case *apps.StatefulSet:
|
||||
return false
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return o.Spec.UpdateStrategy.RollingUpdate != nil && o.Spec.UpdateStrategy.RollingUpdate.UnorderedUpdate != nil
|
||||
case *unstructured.Unstructured:
|
||||
field, found, err := unstructured.NestedFieldNoCopy(o.Object, "spec", "updateStrategy", "rollingUpdate", "unorderedUpdate")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
return field != nil
|
||||
default:
|
||||
panic("unsupported workload type to getStatefulSetMaxUnavailable function")
|
||||
}
|
||||
}
|
||||
|
||||
func getStatefulSetMaxUnavailable(object client.Object) *intstr.IntOrString {
|
||||
switch o := object.(type) {
|
||||
case *apps.StatefulSet:
|
||||
return nil
|
||||
case *appsv1beta1.StatefulSet:
|
||||
if o.Spec.UpdateStrategy.RollingUpdate != nil {
|
||||
return o.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable
|
||||
}
|
||||
return nil
|
||||
case *unstructured.Unstructured:
|
||||
m, found, err := unstructured.NestedFieldCopy(o.Object, "spec", "updateStrategy", "rollingUpdate", "maxUnavailable")
|
||||
if err == nil && found {
|
||||
return unmarshalIntStr(m)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
panic("unsupported workload type to getStatefulSetMaxUnavailable function")
|
||||
}
|
||||
}
|
||||
|
||||
func ParseWorkloadStatus(object client.Object) *WorkloadStatus {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return &WorkloadStatus{
|
||||
Replicas: o.Status.Replicas,
|
||||
ReadyReplicas: o.Status.ReadyReplicas,
|
||||
AvailableReplicas: o.Status.AvailableReplicas,
|
||||
UpdatedReplicas: o.Status.UpdatedReplicas,
|
||||
ObservedGeneration: o.Status.ObservedGeneration,
|
||||
}
|
||||
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return &WorkloadStatus{
|
||||
Replicas: o.Status.Replicas,
|
||||
ReadyReplicas: o.Status.ReadyReplicas,
|
||||
AvailableReplicas: o.Status.AvailableReplicas,
|
||||
UpdatedReplicas: o.Status.UpdatedReplicas,
|
||||
UpdatedReadyReplicas: o.Status.UpdatedReadyReplicas,
|
||||
ObservedGeneration: o.Status.ObservedGeneration,
|
||||
UpdateRevision: o.Status.UpdateRevision,
|
||||
StableRevision: o.Status.CurrentRevision,
|
||||
}
|
||||
|
||||
case *apps.StatefulSet:
|
||||
return &WorkloadStatus{
|
||||
Replicas: o.Status.Replicas,
|
||||
ReadyReplicas: o.Status.ReadyReplicas,
|
||||
AvailableReplicas: o.Status.AvailableReplicas,
|
||||
UpdatedReplicas: o.Status.UpdatedReplicas,
|
||||
ObservedGeneration: o.Status.ObservedGeneration,
|
||||
UpdateRevision: o.Status.UpdateRevision,
|
||||
StableRevision: o.Status.CurrentRevision,
|
||||
}
|
||||
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return &WorkloadStatus{
|
||||
Replicas: o.Status.Replicas,
|
||||
ReadyReplicas: o.Status.ReadyReplicas,
|
||||
AvailableReplicas: o.Status.AvailableReplicas,
|
||||
UpdatedReplicas: o.Status.UpdatedReplicas,
|
||||
ObservedGeneration: o.Status.ObservedGeneration,
|
||||
UpdateRevision: o.Status.UpdateRevision,
|
||||
StableRevision: o.Status.CurrentRevision,
|
||||
}
|
||||
|
||||
case *unstructured.Unstructured:
|
||||
return &WorkloadStatus{
|
||||
ObservedGeneration: int64(parseStatusIntFromUnstructured(o, "observedGeneration")),
|
||||
Replicas: int32(parseStatusIntFromUnstructured(o, "replicas")),
|
||||
ReadyReplicas: int32(parseStatusIntFromUnstructured(o, "readyReplicas")),
|
||||
UpdatedReplicas: int32(parseStatusIntFromUnstructured(o, "updatedReplicas")),
|
||||
AvailableReplicas: int32(parseStatusIntFromUnstructured(o, "availableReplicas")),
|
||||
UpdatedReadyReplicas: int32(parseStatusIntFromUnstructured(o, "updatedReadyReplicas")),
|
||||
UpdateRevision: parseStatusStringFromUnstructured(o, "updateRevision"),
|
||||
StableRevision: parseStatusStringFromUnstructured(o, "currentRevision"),
|
||||
}
|
||||
|
||||
default:
|
||||
panic("unsupported workload type to ParseWorkloadStatus function")
|
||||
}
|
||||
}
|
||||
|
||||
// GetReplicas return replicas from client workload object
|
||||
func GetReplicas(object client.Object) int32 {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return *o.Spec.Replicas
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return *o.Spec.Replicas
|
||||
case *apps.StatefulSet:
|
||||
return *o.Spec.Replicas
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return *o.Spec.Replicas
|
||||
case *unstructured.Unstructured:
|
||||
return parseReplicasFromUnstructured(o)
|
||||
default:
|
||||
panic("unsupported workload type to ParseReplicasFrom function")
|
||||
}
|
||||
}
|
||||
|
||||
// GetTemplate return pod template spec for client workload object
|
||||
func GetTemplate(object client.Object) *corev1.PodTemplateSpec {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return &o.Spec.Template
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return &o.Spec.Template
|
||||
case *apps.StatefulSet:
|
||||
return &o.Spec.Template
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return &o.Spec.Template
|
||||
case *unstructured.Unstructured:
|
||||
return parseTemplateFromUnstructured(o)
|
||||
default:
|
||||
panic("unsupported workload type to ParseTemplateFrom function")
|
||||
}
|
||||
}
|
||||
|
||||
// getSelector can find labelSelector and return labels.Selector after parsed from it for client object
|
||||
func getSelector(object client.Object) (labels.Selector, error) {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
case *apps.StatefulSet:
|
||||
return metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
case *unstructured.Unstructured:
|
||||
return parseSelectorFromUnstructured(o)
|
||||
default:
|
||||
panic("unsupported workload type to parseSelectorFrom function")
|
||||
}
|
||||
}
|
||||
|
||||
// getMetadata can parse the whole metadata field from client workload object
|
||||
func getMetadata(object client.Object) *metav1.ObjectMeta {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return &o.ObjectMeta
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return &o.ObjectMeta
|
||||
case *apps.StatefulSet:
|
||||
return &o.ObjectMeta
|
||||
case *appsv1beta1.StatefulSet:
|
||||
return &o.ObjectMeta
|
||||
case *unstructured.Unstructured:
|
||||
return parseMetadataFromUnstructured(o)
|
||||
default:
|
||||
panic("unsupported workload type to ParseSelector function")
|
||||
}
|
||||
}
|
||||
|
||||
// parseReplicasFromUnstructured parses replicas from unstructured workload object
|
||||
func parseReplicasFromUnstructured(object *unstructured.Unstructured) int32 {
|
||||
replicas := int32(1)
|
||||
field, found, err := unstructured.NestedInt64(object.Object, "spec", "replicas")
|
||||
if err == nil && found {
|
||||
replicas = int32(field)
|
||||
}
|
||||
return replicas
|
||||
}
|
||||
|
||||
// ParseStatusIntFromUnstructured can parse some fields with int type from unstructured workload object status
|
||||
func parseStatusIntFromUnstructured(object *unstructured.Unstructured, field string) int64 {
|
||||
value, found, err := unstructured.NestedInt64(object.Object, "status", field)
|
||||
if err == nil && found {
|
||||
return value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ParseStatusStringFromUnstructured can parse some fields with string type from unstructured workload object status
|
||||
func parseStatusStringFromUnstructured(object *unstructured.Unstructured, field string) string {
|
||||
value, found, err := unstructured.NestedFieldNoCopy(object.Object, "status", field)
|
||||
if err == nil && found {
|
||||
return value.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseSelectorFromUnstructured can parse labelSelector as selector from unstructured workload object
|
||||
func parseSelectorFromUnstructured(object *unstructured.Unstructured) (labels.Selector, error) {
|
||||
m, found, err := unstructured.NestedFieldNoCopy(object.Object, "spec", "selector")
|
||||
if err != nil || !found {
|
||||
return nil, err
|
||||
}
|
||||
byteInfo, _ := json.Marshal(m)
|
||||
labelSelector := &metav1.LabelSelector{}
|
||||
_ = json.Unmarshal(byteInfo, labelSelector)
|
||||
return metav1.LabelSelectorAsSelector(labelSelector)
|
||||
}
|
||||
|
||||
// parseTemplateFromUnstructured can parse pod template from unstructured workload object
|
||||
func parseTemplateFromUnstructured(object *unstructured.Unstructured) *corev1.PodTemplateSpec {
|
||||
t, found, err := unstructured.NestedFieldNoCopy(object.Object, "spec", "template")
|
||||
if err != nil || !found {
|
||||
return nil
|
||||
}
|
||||
template := &corev1.PodTemplateSpec{}
|
||||
templateByte, _ := json.Marshal(t)
|
||||
_ = json.Unmarshal(templateByte, template)
|
||||
return template
|
||||
}
|
||||
|
||||
// parseMetadata can parse the whole metadata field from client workload object
|
||||
func parseMetadataFromUnstructured(object *unstructured.Unstructured) *metav1.ObjectMeta {
|
||||
m, found, err := unstructured.NestedMap(object.Object, "metadata")
|
||||
if err != nil || !found {
|
||||
return nil
|
||||
}
|
||||
data, _ := json.Marshal(m)
|
||||
meta := &metav1.ObjectMeta{}
|
||||
_ = json.Unmarshal(data, meta)
|
||||
return meta
|
||||
}
|
||||
|
||||
func unmarshalIntStr(m interface{}) *intstr.IntOrString {
|
||||
field := &intstr.IntOrString{}
|
||||
data, _ := json.Marshal(m)
|
||||
_ = json.Unmarshal(data, field)
|
||||
return field
|
||||
}
|
||||
|
|
@ -0,0 +1,376 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
appsv1pub "github.com/openkruise/kruise-api/apps/pub"
|
||||
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
template = corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "unit-test",
|
||||
Name: "pod-demo",
|
||||
Labels: map[string]string{
|
||||
"app": "demo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "busybox:1.32",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nativeStatefulSet = appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "StatefulSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "unit-test",
|
||||
Name: "native-statefulset-demo",
|
||||
Generation: 10,
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-anno": "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-label": "true",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: pointer.Int32(10),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "demo",
|
||||
},
|
||||
},
|
||||
Template: template,
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: pointer.Int32(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.StatefulSetStatus{
|
||||
ObservedGeneration: int64(10),
|
||||
Replicas: 9,
|
||||
ReadyReplicas: 8,
|
||||
UpdatedReplicas: 5,
|
||||
CurrentReplicas: 4,
|
||||
AvailableReplicas: 7,
|
||||
CurrentRevision: "sts-version1",
|
||||
UpdateRevision: "sts-version2",
|
||||
},
|
||||
}
|
||||
|
||||
advancedStatefulSet = appsv1beta1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1beta1.SchemeGroupVersion.String(),
|
||||
Kind: "StatefulSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "unit-test",
|
||||
Name: "advanced-statefulset-demo",
|
||||
Generation: 10,
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-anno": "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-label": "true",
|
||||
},
|
||||
},
|
||||
Spec: appsv1beta1.StatefulSetSpec{
|
||||
Replicas: pointer.Int32(10),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "demo",
|
||||
},
|
||||
},
|
||||
Template: template,
|
||||
UpdateStrategy: appsv1beta1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &appsv1beta1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: pointer.Int32(5),
|
||||
MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "10%"},
|
||||
UnorderedUpdate: &appsv1beta1.UnorderedUpdateStrategy{
|
||||
PriorityStrategy: &appsv1pub.UpdatePriorityStrategy{
|
||||
OrderPriority: []appsv1pub.UpdatePriorityOrderTerm{
|
||||
{
|
||||
OrderedKey: "order-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1beta1.StatefulSetStatus{
|
||||
ObservedGeneration: int64(10),
|
||||
Replicas: 9,
|
||||
ReadyReplicas: 8,
|
||||
UpdatedReplicas: 5,
|
||||
AvailableReplicas: 7,
|
||||
CurrentRevision: "sts-version1",
|
||||
UpdateRevision: "sts-version2",
|
||||
},
|
||||
}
|
||||
|
||||
cloneset = appsv1alpha1.CloneSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1alpha1.SchemeGroupVersion.String(),
|
||||
Kind: "CloneSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "unit-test",
|
||||
Name: "cloneset-demo",
|
||||
Generation: 10,
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-anno": "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-label": "true",
|
||||
},
|
||||
},
|
||||
Spec: appsv1alpha1.CloneSetSpec{
|
||||
Replicas: pointer.Int32(10),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "demo",
|
||||
},
|
||||
},
|
||||
Template: template,
|
||||
UpdateStrategy: appsv1alpha1.CloneSetUpdateStrategy{
|
||||
Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType,
|
||||
Partition: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"},
|
||||
MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "10%"},
|
||||
PriorityStrategy: &appsv1pub.UpdatePriorityStrategy{
|
||||
OrderPriority: []appsv1pub.UpdatePriorityOrderTerm{
|
||||
{
|
||||
OrderedKey: "order-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1alpha1.CloneSetStatus{
|
||||
ObservedGeneration: int64(10),
|
||||
Replicas: 9,
|
||||
ReadyReplicas: 8,
|
||||
UpdatedReplicas: 5,
|
||||
UpdatedReadyReplicas: 4,
|
||||
AvailableReplicas: 7,
|
||||
CurrentRevision: "sts-version1",
|
||||
UpdateRevision: "sts-version2",
|
||||
},
|
||||
}
|
||||
|
||||
deployment = appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "unit-test",
|
||||
Name: "deployment-demo",
|
||||
Generation: 10,
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-anno": "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"rollouts.kruise.io/unit-test-label": "true",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(10),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "demo",
|
||||
},
|
||||
},
|
||||
Template: template,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &appsv1.RollingUpdateDeployment{
|
||||
MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "10%"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.DeploymentStatus{
|
||||
ObservedGeneration: int64(10),
|
||||
Replicas: 9,
|
||||
ReadyReplicas: 8,
|
||||
UpdatedReplicas: 5,
|
||||
AvailableReplicas: 7,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestStatefulSetParse(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
Get func() *unstructured.Unstructured
|
||||
}{
|
||||
{
|
||||
name: "native statefulset parse without unorderedUpdate",
|
||||
Get: func() *unstructured.Unstructured {
|
||||
sts := nativeStatefulSet.DeepCopy()
|
||||
object, err := runtime.DefaultUnstructuredConverter.ToUnstructured(sts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return &unstructured.Unstructured{Object: object}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "advanced statefulset parse with unorderedUpdate",
|
||||
Get: func() *unstructured.Unstructured {
|
||||
sts := advancedStatefulSet.DeepCopy()
|
||||
object, err := runtime.DefaultUnstructuredConverter.ToUnstructured(sts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return &unstructured.Unstructured{Object: object}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
object := cs.Get()
|
||||
Expect(IsStatefulSetRollingUpdate(object)).Should(BeTrue())
|
||||
if strings.Contains(cs.name, "native") {
|
||||
Expect(IsStatefulSetUnorderedUpdate(object)).Should(BeFalse())
|
||||
Expect(getStatefulSetMaxUnavailable(object)).Should(BeNil())
|
||||
} else {
|
||||
Expect(IsStatefulSetUnorderedUpdate(object)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(getStatefulSetMaxUnavailable(object), &intstr.IntOrString{Type: intstr.String, StrVal: "10%"})).Should(BeTrue())
|
||||
}
|
||||
Expect(GetStatefulSetPartition(object)).Should(BeNumerically("==", 5))
|
||||
SetStatefulSetPartition(object, 7)
|
||||
Expect(GetStatefulSetPartition(object)).Should(BeNumerically("==", 7))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorkloadParse(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
Get func() client.Object
|
||||
}{
|
||||
{
|
||||
name: "native statefulset parse",
|
||||
Get: func() client.Object {
|
||||
return nativeStatefulSet.DeepCopy()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "advanced statefulset parse",
|
||||
Get: func() client.Object {
|
||||
return advancedStatefulSet.DeepCopy()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cloneset parse",
|
||||
Get: func() client.Object {
|
||||
return cloneset.DeepCopy()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deployment parse",
|
||||
Get: func() client.Object {
|
||||
return deployment.DeepCopy()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
object := cs.Get()
|
||||
switch o := object.(type) {
|
||||
case *appsv1.Deployment:
|
||||
Expect(GetReplicas(object)).Should(BeNumerically("==", *o.Spec.Replicas))
|
||||
selector, err := metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
parsedSelector, err := getSelector(object)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(reflect.DeepEqual(parsedSelector, selector)).Should(BeTrue())
|
||||
case *appsv1alpha1.CloneSet:
|
||||
Expect(GetReplicas(object)).Should(BeNumerically("==", *o.Spec.Replicas))
|
||||
selector, err := metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
parsedSelector, err := getSelector(object)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(reflect.DeepEqual(parsedSelector, selector)).Should(BeTrue())
|
||||
case *appsv1.StatefulSet:
|
||||
uo, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
uobject := &unstructured.Unstructured{Object: uo}
|
||||
Expect(reflect.DeepEqual(GetTemplate(uobject), &o.Spec.Template)).Should(BeTrue())
|
||||
statefulsetInfo := ParseStatefulSetInfo(uobject, client.ObjectKeyFromObject(uobject))
|
||||
{
|
||||
Expect(statefulsetInfo.MaxUnavailable).Should(BeNil())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.ObjectMeta, o.ObjectMeta)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Generation, o.Generation)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Replicas, o.Spec.Replicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.Replicas, o.Status.Replicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.ReadyReplicas, o.Status.ReadyReplicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.AvailableReplicas, o.Status.AvailableReplicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.UpdatedReplicas, o.Status.UpdatedReplicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.ObservedGeneration, o.Status.ObservedGeneration)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.StableRevision, o.Status.CurrentRevision)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.UpdateRevision, o.Status.UpdateRevision)).Should(BeTrue())
|
||||
Expect(statefulsetInfo.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 0))
|
||||
selector, err := metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Selector, selector)).Should(BeTrue())
|
||||
}
|
||||
case *appsv1beta1.StatefulSet:
|
||||
uo, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
uobject := &unstructured.Unstructured{Object: uo}
|
||||
Expect(reflect.DeepEqual(GetTemplate(uobject), &o.Spec.Template)).Should(BeTrue())
|
||||
statefulsetInfo := ParseStatefulSetInfo(uobject, client.ObjectKeyFromObject(uobject))
|
||||
{
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.ObjectMeta, o.ObjectMeta)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Generation, o.Generation)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Replicas, o.Spec.Replicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.MaxUnavailable, o.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.Replicas, o.Status.Replicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.ReadyReplicas, o.Status.ReadyReplicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.AvailableReplicas, o.Status.AvailableReplicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.UpdatedReplicas, o.Status.UpdatedReplicas)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.ObservedGeneration, o.Status.ObservedGeneration)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.StableRevision, o.Status.CurrentRevision)).Should(BeTrue())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Status.UpdateRevision, o.Status.UpdateRevision)).Should(BeTrue())
|
||||
Expect(statefulsetInfo.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 0))
|
||||
selector, err := metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(reflect.DeepEqual(statefulsetInfo.Selector, selector)).Should(BeTrue())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,15 +1,29 @@
|
|||
/*
|
||||
Copyright 2022 The Kruise Authors.
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
utilclient "github.com/openkruise/rollouts/pkg/util/client"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -54,6 +68,7 @@ func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodC
|
|||
return -1, nil
|
||||
}
|
||||
|
||||
// IsConsistentWithRevision return true iff pod is match the revision
|
||||
func IsConsistentWithRevision(pod *v1.Pod, revision string) bool {
|
||||
if pod.Labels[appsv1.DefaultDeploymentUniqueLabelKey] != "" &&
|
||||
strings.HasSuffix(revision, pod.Labels[appsv1.DefaultDeploymentUniqueLabelKey]) {
|
||||
|
|
@ -64,10 +79,10 @@ func IsConsistentWithRevision(pod *v1.Pod, revision string) bool {
|
|||
strings.HasSuffix(revision, pod.Labels[appsv1.ControllerRevisionHashLabelKey]) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FilterActivePods will filter out terminating pods
|
||||
func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
|
||||
var activePods []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
|
|
@ -78,18 +93,20 @@ func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
|
|||
return activePods
|
||||
}
|
||||
|
||||
// IsCompletedPod return true if pod is at Failed or Succeeded phase
|
||||
func IsCompletedPod(pod *v1.Pod) bool {
|
||||
return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded
|
||||
}
|
||||
|
||||
func ListOwnedPods(c client.Client, object client.Object) ([]*v1.Pod, error) {
|
||||
selector, err := parseSelector(object)
|
||||
// ListOwnedPods will list all pods belong to workload, including terminating pods
|
||||
func ListOwnedPods(c client.Client, workload client.Object) ([]*v1.Pod, error) {
|
||||
selector, err := getSelector(workload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podLister := &v1.PodList{}
|
||||
err = c.List(context.TODO(), podLister, &client.ListOptions{LabelSelector: selector, Namespace: object.GetNamespace()}, utilclient.DisableDeepCopy)
|
||||
err = c.List(context.TODO(), podLister, &client.ListOptions{LabelSelector: selector, Namespace: workload.GetNamespace()}, utilclient.DisableDeepCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -101,7 +118,7 @@ func ListOwnedPods(c client.Client, object client.Object) ([]*v1.Pod, error) {
|
|||
}
|
||||
// we should find their indirect owner-relationship,
|
||||
// such as pod -> replicaset -> deployment
|
||||
owned, err := IsOwnedBy(c, pod, object)
|
||||
owned, err := IsOwnedBy(c, pod, workload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !owned {
|
||||
|
|
@ -111,42 +128,3 @@ func ListOwnedPods(c client.Client, object client.Object) ([]*v1.Pod, error) {
|
|||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func PatchPodBatchLabel(c client.Client, pods []*v1.Pod, rolloutID string, batchID int32, updateRevision string, canaryGoal int32, logKey types.NamespacedName) (bool, error) {
|
||||
// the number of active pods that has been patched successfully.
|
||||
patchedUpdatedReplicas := int32(0)
|
||||
for _, pod := range pods {
|
||||
podRolloutID := pod.Labels[RolloutIDLabel]
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
// we don't patch label for the active old revision pod
|
||||
if !IsConsistentWithRevision(pod, updateRevision) {
|
||||
continue
|
||||
}
|
||||
// if it has been patched, count and ignore
|
||||
if podRolloutID == rolloutID {
|
||||
patchedUpdatedReplicas++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// for such terminating pod and others, if it has been patched, just ignore
|
||||
if podRolloutID == rolloutID {
|
||||
continue
|
||||
}
|
||||
|
||||
podClone := pod.DeepCopy()
|
||||
by := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s","%s":"%d"}}}`, RolloutIDLabel, rolloutID, RolloutBatchIDLabel, batchID)
|
||||
err := c.Patch(context.TODO(), podClone, client.RawPatch(types.StrategicMergePatchType, []byte(by)))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to patch Pod(%v) batchID, err: %v", client.ObjectKeyFromObject(podClone), err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp.IsZero() && IsConsistentWithRevision(pod, updateRevision) {
|
||||
patchedUpdatedReplicas++
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Patch %v pods with batchID for batchRelease %v, goal is %d pods", patchedUpdatedReplicas, logKey, canaryGoal)
|
||||
return patchedUpdatedReplicas >= canaryGoal, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,10 @@ limitations under the License.
|
|||
package util
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
|
|
@ -35,24 +38,6 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
// InRolloutProgressingAnnotation marks workload as entering the rollout progressing process
|
||||
//and does not allow paused=false during this process
|
||||
InRolloutProgressingAnnotation = "rollouts.kruise.io/in-progressing"
|
||||
// finalizer
|
||||
KruiseRolloutFinalizer = "rollouts.kruise.io/rollout"
|
||||
// rollout spec hash
|
||||
RolloutHashAnnotation = "rollouts.kruise.io/hash"
|
||||
// RolloutIDLabel is designed to distinguish each workload revision publications.
|
||||
// The value of RolloutIDLabel corresponds Rollout.Spec.RolloutID.
|
||||
RolloutIDLabel = "apps.kruise.io/rollout-id"
|
||||
// RolloutBatchIDLabel is the label key of batch id that will be patched to pods during rollout.
|
||||
// Only when RolloutIDLabel is set, RolloutBatchIDLabel will be patched.
|
||||
// Users can use RolloutIDLabel and RolloutBatchIDLabel to select the pods that are upgraded in some certain batch and release.
|
||||
RolloutBatchIDLabel = "apps.kruise.io/rollout-batch-id"
|
||||
WorkloadTypeLabel = "rollouts.kruise.io/workload-type"
|
||||
)
|
||||
|
||||
// RolloutState is annotation[rollouts.kruise.io/in-progressing] value
|
||||
type RolloutState struct {
|
||||
RolloutName string `json:"rolloutName"`
|
||||
|
|
@ -68,37 +53,48 @@ func GetRolloutState(annotations map[string]string) (*RolloutState, error) {
|
|||
return obj, err
|
||||
}
|
||||
|
||||
func AddWorkloadWatcher(c controller.Controller, handler handler.EventHandler) error {
|
||||
if DiscoverGVK(ControllerKruiseKindCS) {
|
||||
// Watch changes to CloneSet
|
||||
err := c.Watch(&source.Kind{Type: &kruiseappsv1alpha1.CloneSet{}}, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
func IsRollbackInBatchPolicy(rollout *rolloutv1alpha1.Rollout, labels map[string]string) bool {
|
||||
// currently, only support the case of no traffic routing
|
||||
if len(rollout.Spec.Strategy.Canary.TrafficRoutings) > 0 {
|
||||
return false
|
||||
}
|
||||
workloadRef := rollout.Spec.ObjectRef.WorkloadRef
|
||||
//currently, only CloneSet, StatefulSet support this policy
|
||||
if workloadRef.Kind == ControllerKindSts.Kind ||
|
||||
workloadRef.Kind == ControllerKruiseKindCS.Kind ||
|
||||
strings.EqualFold(labels[WorkloadTypeLabel], ControllerKindSts.Kind) {
|
||||
value, ok := rollout.Annotations[RollbackInBatchAnnotation]
|
||||
if ok && value == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func AddWorkloadWatcher(c controller.Controller, handler handler.EventHandler) error {
|
||||
// Watch changes to Deployment
|
||||
err := c.Watch(&source.Kind{Type: &apps.Deployment{}}, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch changes to Advanced StatefulSet, use unstructured informer
|
||||
if DiscoverGVK(ControllerKruiseKindSts) {
|
||||
objectType := &unstructured.Unstructured{}
|
||||
objectType.SetGroupVersionKind(kruiseappsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"))
|
||||
err = c.Watch(&source.Kind{Type: objectType}, handler)
|
||||
// Watch changes to Native StatefulSet, use unstructured informer
|
||||
err = c.Watch(&source.Kind{Type: &apps.StatefulSet{}}, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Watch changes to CloneSet if it has the CRD
|
||||
if DiscoverGVK(ControllerKruiseKindCS) {
|
||||
err := c.Watch(&source.Kind{Type: &kruiseappsv1alpha1.CloneSet{}}, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Watch changes to Native StatefulSet, use unstructured informer
|
||||
objectType := &unstructured.Unstructured{}
|
||||
objectType.SetGroupVersionKind(apps.SchemeGroupVersion.WithKind("StatefulSet"))
|
||||
err = c.Watch(&source.Kind{Type: objectType}, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
// Watch changes to Advanced StatefulSet if it has the CRD
|
||||
if DiscoverGVK(ControllerKruiseKindSts) {
|
||||
err := c.Watch(&source.Kind{Type: &kruiseappsv1beta1.StatefulSet{}}, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -155,6 +151,12 @@ func AddWatcherDynamically(c controller.Controller, h handler.EventHandler, gvk
|
|||
return true, c.Watch(&source.Kind{Type: object}, h)
|
||||
}
|
||||
|
||||
func HashReleasePlanBatches(releasePlan *rolloutv1alpha1.ReleasePlan) string {
|
||||
by, _ := json.Marshal(releasePlan)
|
||||
md5Hash := sha256.Sum256(by)
|
||||
return hex.EncodeToString(md5Hash[:])
|
||||
}
|
||||
|
||||
func DumpJSON(o interface{}) string {
|
||||
by, _ := json.Marshal(o)
|
||||
return string(by)
|
||||
|
|
@ -18,10 +18,7 @@ package util
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
|
|
@ -29,7 +26,7 @@ import (
|
|||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
"github.com/openkruise/rollouts/api/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
|
||||
"github.com/openkruise/rollouts/pkg/feature"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -43,33 +40,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// BatchReleaseControlAnnotation is controller info about batchRelease when rollout
|
||||
BatchReleaseControlAnnotation = "batchrelease.rollouts.kruise.io/control-info"
|
||||
// CanaryDeploymentLabel is to label canary deployment that is created by batchRelease controller
|
||||
CanaryDeploymentLabel = "rollouts.kruise.io/canary-deployment"
|
||||
// CanaryDeploymentFinalizer is a finalizer to resources patched by batchRelease controller
|
||||
CanaryDeploymentFinalizer = "finalizer.rollouts.kruise.io/batch-release"
|
||||
|
||||
// We omit vowels from the set of available characters to reduce the chances
|
||||
// of "bad words" being formed.
|
||||
alphanums = "bcdfghjklmnpqrstvwxz2456789"
|
||||
)
|
||||
|
||||
type WorkloadType string
|
||||
|
||||
const (
|
||||
StatefulSetType WorkloadType = "statefulset"
|
||||
DeploymentType WorkloadType = "deployment"
|
||||
CloneSetType WorkloadType = "cloneset"
|
||||
)
|
||||
|
||||
var (
|
||||
knownWorkloadGVKs = []*schema.GroupVersionKind{
|
||||
&ControllerKindRS,
|
||||
|
|
@ -77,6 +51,7 @@ var (
|
|||
&ControllerKindSts,
|
||||
&ControllerKruiseKindCS,
|
||||
&ControllerKruiseKindSts,
|
||||
&ControllerKruiseOldKindSts,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -92,12 +67,12 @@ type WorkloadStatus struct {
|
|||
}
|
||||
|
||||
type WorkloadInfo struct {
|
||||
metav1.ObjectMeta
|
||||
Paused bool
|
||||
Replicas *int32
|
||||
GVKWithName string
|
||||
Selector labels.Selector
|
||||
MaxUnavailable *intstr.IntOrString
|
||||
Metadata *metav1.ObjectMeta
|
||||
Status *WorkloadStatus
|
||||
}
|
||||
|
||||
|
|
@ -148,21 +123,7 @@ func SafeEncodeString(s string) string {
|
|||
return string(r)
|
||||
}
|
||||
|
||||
func CalculateNewBatchTarget(rolloutSpec *v1alpha1.ReleasePlan, workloadReplicas, currentBatch int) int {
|
||||
batchSize, _ := intstr.GetValueFromIntOrPercent(&rolloutSpec.Batches[currentBatch].CanaryReplicas, workloadReplicas, true)
|
||||
if batchSize > workloadReplicas {
|
||||
klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is more than workload.replicas %v", currentBatch, batchSize, workloadReplicas)
|
||||
batchSize = workloadReplicas
|
||||
} else if batchSize < 0 {
|
||||
klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is less than 0 %v", currentBatch, batchSize)
|
||||
batchSize = 0
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("calculated the number of new pod size", "current batch", currentBatch,
|
||||
"new pod target", batchSize)
|
||||
return batchSize
|
||||
}
|
||||
|
||||
// EqualIgnoreHash compare template without pod-template-hash label
|
||||
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
|
||||
t1Copy := template1.DeepCopy()
|
||||
t2Copy := template2.DeepCopy()
|
||||
|
|
@ -172,19 +133,7 @@ func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
|
|||
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
|
||||
}
|
||||
|
||||
func HashReleasePlanBatches(releasePlan *v1alpha1.ReleasePlan) string {
|
||||
by, _ := json.Marshal(releasePlan)
|
||||
md5Hash := sha256.Sum256(by)
|
||||
return hex.EncodeToString(md5Hash[:])
|
||||
}
|
||||
|
||||
type FinalizerOpType string
|
||||
|
||||
const (
|
||||
AddFinalizerOpType FinalizerOpType = "Add"
|
||||
RemoveFinalizerOpType FinalizerOpType = "Remove"
|
||||
)
|
||||
|
||||
// UpdateFinalizer add/remove a finalizer from a object
|
||||
func UpdateFinalizer(c client.Client, object client.Object, op FinalizerOpType, finalizer string) error {
|
||||
switch op {
|
||||
case AddFinalizerOpType, RemoveFinalizerOpType:
|
||||
|
|
@ -218,15 +167,7 @@ func UpdateFinalizer(c client.Client, object client.Object, op FinalizerOpType,
|
|||
})
|
||||
}
|
||||
|
||||
func PatchSpec(c client.Client, object client.Object, spec map[string]interface{}) error {
|
||||
patchByte, err := json.Marshal(map[string]interface{}{"spec": spec})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clone := object.DeepCopyObject().(client.Object)
|
||||
return c.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte))
|
||||
}
|
||||
|
||||
// GetEmptyWorkloadObject return specific object based on the given gvk
|
||||
func GetEmptyWorkloadObject(gvk schema.GroupVersionKind) client.Object {
|
||||
if !IsSupportedWorkload(gvk) {
|
||||
return nil
|
||||
|
|
@ -239,6 +180,10 @@ func GetEmptyWorkloadObject(gvk schema.GroupVersionKind) client.Object {
|
|||
return &apps.Deployment{}
|
||||
case ControllerKruiseKindCS:
|
||||
return &appsv1alpha1.CloneSet{}
|
||||
case ControllerKindSts:
|
||||
return &apps.StatefulSet{}
|
||||
case ControllerKruiseKindSts, ControllerKruiseOldKindSts:
|
||||
return &appsv1beta1.StatefulSet{}
|
||||
default:
|
||||
unstructuredObject := &unstructured.Unstructured{}
|
||||
unstructuredObject.SetGroupVersionKind(gvk)
|
||||
|
|
@ -246,235 +191,7 @@ func GetEmptyWorkloadObject(gvk schema.GroupVersionKind) client.Object {
|
|||
}
|
||||
}
|
||||
|
||||
func ReleaseWorkload(c client.Client, object client.Object) error {
|
||||
_, found := object.GetAnnotations()[BatchReleaseControlAnnotation]
|
||||
if !found {
|
||||
klog.V(3).Infof("Workload(%v) is already released", client.ObjectKeyFromObject(object))
|
||||
return nil
|
||||
}
|
||||
|
||||
clone := object.DeepCopyObject().(client.Object)
|
||||
patchByte := []byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":null}}}`, BatchReleaseControlAnnotation))
|
||||
return c.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte))
|
||||
}
|
||||
|
||||
func ClaimWorkload(c client.Client, planController *v1alpha1.BatchRelease, object client.Object, patchUpdateStrategy map[string]interface{}) error {
|
||||
if controlInfo, ok := object.GetAnnotations()[BatchReleaseControlAnnotation]; ok && controlInfo != "" {
|
||||
ref := &metav1.OwnerReference{}
|
||||
err := json.Unmarshal([]byte(controlInfo), ref)
|
||||
if err == nil && ref.UID == planController.UID {
|
||||
klog.V(3).Infof("Workload(%v) has been controlled by this BatchRelease(%v), no need to claim again",
|
||||
client.ObjectKeyFromObject(object), client.ObjectKeyFromObject(planController))
|
||||
return nil
|
||||
} else {
|
||||
klog.Errorf("Failed to parse controller info from Workload(%v) annotation, error: %v, controller info: %+v",
|
||||
client.ObjectKeyFromObject(object), err, *ref)
|
||||
}
|
||||
}
|
||||
|
||||
controlInfo, _ := json.Marshal(metav1.NewControllerRef(planController, planController.GetObjectKind().GroupVersionKind()))
|
||||
patch := map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]string{
|
||||
BatchReleaseControlAnnotation: string(controlInfo),
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"updateStrategy": patchUpdateStrategy,
|
||||
},
|
||||
}
|
||||
|
||||
patchByte, _ := json.Marshal(patch)
|
||||
clone := object.DeepCopyObject().(client.Object)
|
||||
return c.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte))
|
||||
}
|
||||
|
||||
func IsStatefulSetRollingUpdate(object *unstructured.Unstructured) bool {
|
||||
t, _, err := unstructured.NestedString(object.Object, "spec", "updateStrategy", "type")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return t == "" || t == string(apps.RollingUpdateStatefulSetStrategyType)
|
||||
}
|
||||
|
||||
func SetStatefulSetPartition(object *unstructured.Unstructured, partition int32) {
|
||||
o := object.Object
|
||||
spec, ok := o["spec"].(map[string]interface{})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updateStrategy, ok := spec["updateStrategy"].(map[string]interface{})
|
||||
if !ok {
|
||||
spec["updateStrategy"] = map[string]interface{}{
|
||||
"type": apps.RollingUpdateStatefulSetStrategyType,
|
||||
"rollingUpdate": map[string]interface{}{
|
||||
"partition": pointer.Int32(partition),
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
rollingUpdate, ok := updateStrategy["rollingUpdate"].(map[string]interface{})
|
||||
if !ok {
|
||||
updateStrategy["rollingUpdate"] = map[string]interface{}{
|
||||
"partition": pointer.Int32(partition),
|
||||
}
|
||||
} else {
|
||||
rollingUpdate["partition"] = pointer.Int32(partition)
|
||||
}
|
||||
}
|
||||
|
||||
func GetStatefulSetPartition(object *unstructured.Unstructured) int32 {
|
||||
partition := int32(0)
|
||||
field, found, err := unstructured.NestedInt64(object.Object, "spec", "updateStrategy", "rollingUpdate", "partition")
|
||||
if err == nil && found {
|
||||
partition = int32(field)
|
||||
}
|
||||
return partition
|
||||
}
|
||||
|
||||
func GetStatefulSetMaxUnavailable(object *unstructured.Unstructured) *intstr.IntOrString {
|
||||
m, found, err := unstructured.NestedFieldCopy(object.Object, "spec", "updateStrategy", "rollingUpdate", "maxUnavailable")
|
||||
if err == nil && found {
|
||||
return unmarshalIntStr(m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseStatefulSetInfo(object *unstructured.Unstructured, namespacedName types.NamespacedName) *WorkloadInfo {
|
||||
workloadGVKWithName := fmt.Sprintf("%v(%v)", object.GroupVersionKind().String(), namespacedName)
|
||||
selector, err := parseSelector(object)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse selector for workload(%v)", workloadGVKWithName)
|
||||
}
|
||||
return &WorkloadInfo{
|
||||
Metadata: parseMetadataFrom(object),
|
||||
MaxUnavailable: GetStatefulSetMaxUnavailable(object),
|
||||
Replicas: pointer.Int32(ParseReplicasFrom(object)),
|
||||
GVKWithName: workloadGVKWithName,
|
||||
Selector: selector,
|
||||
Status: ParseWorkloadStatus(object),
|
||||
}
|
||||
}
|
||||
|
||||
func ParseWorkloadStatus(object client.Object) *WorkloadStatus {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return &WorkloadStatus{
|
||||
Replicas: o.Status.Replicas,
|
||||
ReadyReplicas: o.Status.ReadyReplicas,
|
||||
AvailableReplicas: o.Status.AvailableReplicas,
|
||||
UpdatedReplicas: o.Status.UpdatedReplicas,
|
||||
ObservedGeneration: o.Status.ObservedGeneration,
|
||||
}
|
||||
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return &WorkloadStatus{
|
||||
Replicas: o.Status.Replicas,
|
||||
ReadyReplicas: o.Status.ReadyReplicas,
|
||||
AvailableReplicas: o.Status.AvailableReplicas,
|
||||
UpdatedReplicas: o.Status.UpdatedReplicas,
|
||||
UpdatedReadyReplicas: o.Status.UpdatedReadyReplicas,
|
||||
ObservedGeneration: o.Status.ObservedGeneration,
|
||||
}
|
||||
|
||||
case *unstructured.Unstructured:
|
||||
return &WorkloadStatus{
|
||||
ObservedGeneration: int64(ParseStatusIntFrom(o, "observedGeneration")),
|
||||
Replicas: int32(ParseStatusIntFrom(o, "replicas")),
|
||||
ReadyReplicas: int32(ParseStatusIntFrom(o, "readyReplicas")),
|
||||
UpdatedReplicas: int32(ParseStatusIntFrom(o, "updatedReplicas")),
|
||||
AvailableReplicas: int32(ParseStatusIntFrom(o, "availableReplicas")),
|
||||
UpdatedReadyReplicas: int32(ParseStatusIntFrom(o, "updatedReadyReplicas")),
|
||||
UpdateRevision: ParseStatusStringFrom(o, "updateRevision"),
|
||||
StableRevision: ParseStatusStringFrom(o, "currentRevision"),
|
||||
}
|
||||
|
||||
default:
|
||||
panic("unsupported workload type to ParseWorkloadStatus function")
|
||||
}
|
||||
}
|
||||
|
||||
// ParseReplicasFrom parses replicas from unstructured workload object
|
||||
func ParseReplicasFrom(object *unstructured.Unstructured) int32 {
|
||||
replicas := int32(1)
|
||||
field, found, err := unstructured.NestedInt64(object.Object, "spec", "replicas")
|
||||
if err == nil && found {
|
||||
replicas = int32(field)
|
||||
}
|
||||
return replicas
|
||||
}
|
||||
|
||||
// ParseTemplateFrom parses template from unstructured workload object
|
||||
func ParseTemplateFrom(object *unstructured.Unstructured) *v1.PodTemplateSpec {
|
||||
t, found, err := unstructured.NestedFieldNoCopy(object.Object, "spec", "template")
|
||||
if err != nil || !found {
|
||||
return nil
|
||||
}
|
||||
template := &v1.PodTemplateSpec{}
|
||||
templateByte, _ := json.Marshal(t)
|
||||
_ = json.Unmarshal(templateByte, template)
|
||||
return template
|
||||
}
|
||||
|
||||
// ParseStatusIntFrom can parse some fields with int type from unstructured workload object status
|
||||
func ParseStatusIntFrom(object *unstructured.Unstructured, field string) int64 {
|
||||
value, found, err := unstructured.NestedInt64(object.Object, "status", field)
|
||||
if err == nil && found {
|
||||
return value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ParseStatusStringFrom can parse some fields with string type from unstructured workload object status
|
||||
func ParseStatusStringFrom(object *unstructured.Unstructured, field string) string {
|
||||
value, found, err := unstructured.NestedFieldNoCopy(object.Object, "status", field)
|
||||
if err == nil && found {
|
||||
return value.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseMetadataFrom can parse the whole metadata field from unstructured workload object
|
||||
func parseMetadataFrom(object *unstructured.Unstructured) *metav1.ObjectMeta {
|
||||
m, found, err := unstructured.NestedMap(object.Object, "metadata")
|
||||
if err != nil || !found {
|
||||
return nil
|
||||
}
|
||||
data, _ := json.Marshal(m)
|
||||
meta := &metav1.ObjectMeta{}
|
||||
_ = json.Unmarshal(data, meta)
|
||||
return meta
|
||||
}
|
||||
|
||||
// parseSelector can find labelSelector and parse it as labels.Selector for client object
|
||||
func parseSelector(object client.Object) (labels.Selector, error) {
|
||||
switch o := object.(type) {
|
||||
case *apps.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
case *appsv1alpha1.CloneSet:
|
||||
return metav1.LabelSelectorAsSelector(o.Spec.Selector)
|
||||
case *unstructured.Unstructured:
|
||||
m, found, err := unstructured.NestedFieldNoCopy(o.Object, "spec", "selector")
|
||||
if err != nil || !found {
|
||||
return nil, err
|
||||
}
|
||||
byteInfo, _ := json.Marshal(m)
|
||||
labelSelector := &metav1.LabelSelector{}
|
||||
_ = json.Unmarshal(byteInfo, labelSelector)
|
||||
return metav1.LabelSelectorAsSelector(labelSelector)
|
||||
default:
|
||||
panic("unsupported workload type to ParseSelector function")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func unmarshalIntStr(m interface{}) *intstr.IntOrString {
|
||||
field := &intstr.IntOrString{}
|
||||
data, _ := json.Marshal(m)
|
||||
_ = json.Unmarshal(data, field)
|
||||
return field
|
||||
}
|
||||
|
||||
// FilterActiveDeployment will filter out terminating deployment
|
||||
func FilterActiveDeployment(ds []*apps.Deployment) []*apps.Deployment {
|
||||
var activeDs []*apps.Deployment
|
||||
for i := range ds {
|
||||
|
|
@ -485,23 +202,6 @@ func FilterActiveDeployment(ds []*apps.Deployment) []*apps.Deployment {
|
|||
return activeDs
|
||||
}
|
||||
|
||||
func GenRandomStr(length int) string {
|
||||
randStr := rand.String(length)
|
||||
return rand.SafeEncodeString(randStr)
|
||||
}
|
||||
|
||||
func IsSupportedWorkload(gvk schema.GroupVersionKind) bool {
|
||||
if !feature.NeedFilterWorkloadType() {
|
||||
return true
|
||||
}
|
||||
for _, known := range knownWorkloadGVKs {
|
||||
if gvk.Group == known.Group && gvk.Kind == known.Kind {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetOwnerWorkload return the top-level workload that is controlled by rollout,
|
||||
// if the object has no owner, just return nil
|
||||
func GetOwnerWorkload(r client.Reader, object client.Object) (client.Object, error) {
|
||||
|
|
@ -511,7 +211,7 @@ func GetOwnerWorkload(r client.Reader, object client.Object) (client.Object, err
|
|||
owner := metav1.GetControllerOf(object)
|
||||
// We just care about the top-level workload that is referred by rollout
|
||||
if owner == nil || len(object.GetAnnotations()[InRolloutProgressingAnnotation]) > 0 {
|
||||
return nil, nil
|
||||
return object, nil
|
||||
}
|
||||
|
||||
ownerGvk := schema.FromAPIVersionAndKind(owner.APIVersion, owner.Kind)
|
||||
|
|
@ -551,6 +251,26 @@ func IsOwnedBy(r client.Reader, child, parent client.Object) (bool, error) {
|
|||
return IsOwnedBy(r, ownerObj, parent)
|
||||
}
|
||||
|
||||
// IsSupportedWorkload return true if the kind of workload can be processed by Rollout
|
||||
func IsSupportedWorkload(gvk schema.GroupVersionKind) bool {
|
||||
if !feature.NeedFilterWorkloadType() {
|
||||
return true
|
||||
}
|
||||
for _, known := range knownWorkloadGVKs {
|
||||
if gvk.Group == known.Group && gvk.Kind == known.Kind {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsWorkloadType return true is object matches the workload type
|
||||
func IsWorkloadType(object client.Object, t WorkloadType) bool {
|
||||
return WorkloadType(strings.ToLower(object.GetLabels()[WorkloadTypeLabel])) == t
|
||||
}
|
||||
|
||||
// GenRandomStr returns a safe encoded string with a specific length
|
||||
func GenRandomStr(length int) string {
|
||||
randStr := rand.String(length)
|
||||
return rand.SafeEncodeString(randStr)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,207 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
|
||||
appsv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
var scheme *runtime.Scheme
|
||||
|
||||
func init() {
|
||||
scheme = runtime.NewScheme()
|
||||
_ = appsv1.AddToScheme(scheme)
|
||||
_ = appsv1beta1.AddToScheme(scheme)
|
||||
_ = appsv1alpha1.AddToScheme(scheme)
|
||||
_ = kruiseappsv1alpha1.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
func TestIsOwnedBy(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
TopologyBuild func() []client.Object
|
||||
Expect bool
|
||||
}{
|
||||
{
|
||||
name: "direct",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := cloneset.DeepCopy()
|
||||
son := deployment.DeepCopy()
|
||||
son.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(father, father.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son}
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
{
|
||||
name: "indirect-2",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := cloneset.DeepCopy()
|
||||
son1 := deployment.DeepCopy()
|
||||
son1.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(father, father.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son2 := nativeStatefulSet.DeepCopy()
|
||||
son2.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son1, son1.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son1, son2}
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
{
|
||||
name: "indirect-3",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := cloneset.DeepCopy()
|
||||
son1 := deployment.DeepCopy()
|
||||
son1.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(father, father.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son2 := nativeStatefulSet.DeepCopy()
|
||||
son2.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son1, son1.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son3 := advancedStatefulSet.DeepCopy()
|
||||
son3.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son2, son2.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son1, son2, son3}
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
{
|
||||
name: "indirect-3-false",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := cloneset.DeepCopy()
|
||||
son1 := deployment.DeepCopy()
|
||||
son2 := nativeStatefulSet.DeepCopy()
|
||||
son2.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son1, son1.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son3 := advancedStatefulSet.DeepCopy()
|
||||
son3.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son2, son2.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son1, son2, son3}
|
||||
},
|
||||
Expect: false,
|
||||
},
|
||||
}
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
objects := cs.TopologyBuild()
|
||||
father := objects[0]
|
||||
son := objects[len(objects)-1]
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build()
|
||||
owned, err := IsOwnedBy(cli, son, father)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(owned == cs.Expect).Should(BeTrue())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOwnerWorkload(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
topWorkload := cloneset.DeepCopy()
|
||||
topWorkload.Annotations[BatchReleaseControlAnnotation] = "something"
|
||||
cases := []struct {
|
||||
name string
|
||||
TopologyBuild func() []client.Object
|
||||
Expect bool
|
||||
}{
|
||||
{
|
||||
name: "direct",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := topWorkload.DeepCopy()
|
||||
son := deployment.DeepCopy()
|
||||
son.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(father, father.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son}
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
{
|
||||
name: "indirect-2",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := topWorkload.DeepCopy()
|
||||
son1 := deployment.DeepCopy()
|
||||
son1.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(father, father.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son2 := nativeStatefulSet.DeepCopy()
|
||||
son2.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son1, son1.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son1, son2}
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
{
|
||||
name: "indirect-3",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := topWorkload.DeepCopy()
|
||||
son1 := deployment.DeepCopy()
|
||||
son1.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(father, father.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son2 := nativeStatefulSet.DeepCopy()
|
||||
son2.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son1, son1.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son3 := advancedStatefulSet.DeepCopy()
|
||||
son3.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son2, son2.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son1, son2, son3}
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
{
|
||||
name: "indirect-3-false",
|
||||
TopologyBuild: func() []client.Object {
|
||||
father := topWorkload.DeepCopy()
|
||||
son1 := deployment.DeepCopy()
|
||||
son2 := nativeStatefulSet.DeepCopy()
|
||||
son2.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son1, son1.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
son3 := advancedStatefulSet.DeepCopy()
|
||||
son3.SetOwnerReferences([]metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(son2, son2.GetObjectKind().GroupVersionKind()),
|
||||
})
|
||||
return []client.Object{father, son1, son2, son3}
|
||||
},
|
||||
Expect: false,
|
||||
},
|
||||
}
|
||||
for _, cs := range cases {
|
||||
t.Run(cs.name, func(t *testing.T) {
|
||||
objects := cs.TopologyBuild()
|
||||
father := objects[0]
|
||||
son := objects[len(objects)-1]
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build()
|
||||
got, err := GetOwnerWorkload(cli, son)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if cs.Expect {
|
||||
Expect(reflect.DeepEqual(father, got)).Should(BeTrue())
|
||||
} else {
|
||||
Expect(reflect.DeepEqual(father, got)).Should(BeFalse())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -29,6 +29,7 @@ import (
|
|||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
|
|
@ -157,15 +158,15 @@ func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) adm
|
|||
func (h *WorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (changed bool, err error) {
|
||||
// indicate whether the workload can enter the rollout process
|
||||
// 1. replicas > 0
|
||||
replicas := util.ParseReplicasFrom(newObj)
|
||||
replicas := util.GetReplicas(newObj)
|
||||
if replicas == 0 {
|
||||
return false, nil
|
||||
}
|
||||
oldTemplate := util.ParseTemplateFrom(oldObj)
|
||||
oldTemplate := util.GetTemplate(oldObj)
|
||||
if oldTemplate == nil {
|
||||
return false, nil
|
||||
}
|
||||
newTemplate := util.ParseTemplateFrom(newObj)
|
||||
newTemplate := util.GetTemplate(newObj)
|
||||
if newTemplate == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -277,6 +278,7 @@ func (h *WorkloadHandler) handleCloneSet(newObj, oldObj *kruiseappsv1alpha1.Clon
|
|||
changed = true
|
||||
// need set workload paused = true
|
||||
newObj.Spec.UpdateStrategy.Paused = true
|
||||
newObj.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
|
||||
state := &util.RolloutState{RolloutName: rollout.Name}
|
||||
by, _ := json.Marshal(state)
|
||||
if newObj.Annotations == nil {
|
||||
|
|
|
|||
|
|
@ -429,6 +429,7 @@ func TestHandlerCloneSet(t *testing.T) {
|
|||
obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2"
|
||||
obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}`
|
||||
obj.Spec.UpdateStrategy.Paused = true
|
||||
obj.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
|
||||
return obj
|
||||
},
|
||||
getRollout: func() *appsv1alpha1.Rollout {
|
||||
|
|
|
|||
|
|
@ -246,7 +246,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred())
|
||||
return clone.Status.UpdatedReplicas
|
||||
}, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas)))
|
||||
time.Sleep(time.Duration(batch.PauseSeconds) * time.Second)
|
||||
}
|
||||
|
||||
By("Checking BatchRelease status...")
|
||||
|
|
@ -292,7 +291,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred())
|
||||
return clone.Status.UpdatedReplicas
|
||||
}, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas)))
|
||||
time.Sleep(time.Duration(batch.PauseSeconds) * time.Second)
|
||||
}
|
||||
|
||||
By("Checking BatchRelease status...")
|
||||
|
|
@ -351,7 +349,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred())
|
||||
return clone.Status.UpdatedReplicas
|
||||
}, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas)))
|
||||
time.Sleep(time.Duration(batch.PauseSeconds) * time.Second)
|
||||
}
|
||||
|
||||
By("V1->V2: Checking BatchRelease status...")
|
||||
|
|
@ -667,7 +664,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
Expect(clone).ShouldNot(BeNil())
|
||||
return clone.Status.Replicas
|
||||
}, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas)))
|
||||
time.Sleep(time.Duration(batch.PauseSeconds) * time.Second)
|
||||
}
|
||||
|
||||
By("Checking BatchRelease status...")
|
||||
|
|
@ -716,7 +712,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
Expect(clone).ShouldNot(BeNil())
|
||||
return clone.Status.UpdatedReplicas
|
||||
}, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas)))
|
||||
time.Sleep(time.Duration(batch.PauseSeconds) * time.Second)
|
||||
}
|
||||
|
||||
By("Checking BatchRelease status...")
|
||||
|
|
@ -778,7 +773,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
Expect(clone).ShouldNot(BeNil())
|
||||
return clone.Status.UpdatedReplicas
|
||||
}, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas)))
|
||||
time.Sleep(time.Duration(batch.PauseSeconds) * time.Second)
|
||||
}
|
||||
|
||||
By("V1->V2: Checking BatchRelease status...")
|
||||
|
|
@ -1206,7 +1200,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
By("Creating BatchRelease...")
|
||||
release := &rolloutsv1alpha1.BatchRelease{}
|
||||
Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred())
|
||||
release.Spec.ReleasePlan.Batches[1].PauseSeconds = 10000
|
||||
CreateObject(release)
|
||||
|
||||
By("Creating workload and waiting for all pods ready...")
|
||||
|
|
@ -1247,7 +1240,6 @@ var _ = SIGDescribe("BatchRelease", func() {
|
|||
fetchedRelease.Spec.ReleasePlan.Batches = []rolloutsv1alpha1.ReleaseBatch{
|
||||
{
|
||||
CanaryReplicas: intstr.FromInt(4),
|
||||
PauseSeconds: 10,
|
||||
},
|
||||
{
|
||||
CanaryReplicas: intstr.FromString("100%"),
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue