mirror of https://github.com/openkruise/kruise.git
Compare commits
176 Commits
Author | SHA1 | Date |
---|---|---|
|
8ff20ad150 | |
|
6bc00ac6ba | |
|
efb4978f2f | |
|
e258de4548 | |
|
5408631390 | |
|
b74b675c5f | |
|
84eb758b03 | |
|
f6e5215fe0 | |
|
33cd2c5105 | |
|
65478006c9 | |
|
abdda530d8 | |
|
a7cb4bdf20 | |
|
fb775bd071 | |
|
925982d429 | |
|
d4d417d261 | |
|
8e300c52fa | |
|
f69813ce9c | |
|
f07f7088bf | |
|
4778e7cfea | |
|
fc611f0505 | |
|
345c20a095 | |
|
e132c8c0f5 | |
|
51bb0dc2b0 | |
|
9ede0b9339 | |
|
d37d9a15a8 | |
|
509b7b9715 | |
|
fab63bda03 | |
|
d058a44291 | |
|
686d9b2268 | |
|
648f9337c5 | |
|
92aead119d | |
|
1320571308 | |
|
4764af0537 | |
|
715f2099e4 | |
|
9c3a79bf7e | |
|
0fbbe891a3 | |
|
edf0305884 | |
|
ff8dceca69 | |
|
4025f616aa | |
|
145a67f4b0 | |
|
ec72c8502f | |
|
dcc9fff249 | |
|
6db558945f | |
|
24e93533d2 | |
|
d65527ea66 | |
|
d8bf9c9b53 | |
|
a6fafc5fb4 | |
|
f97a0f3cb1 | |
|
7d35bcef5c | |
|
8a7085a4b6 | |
|
a5968c805e | |
|
c5bc8a0809 | |
|
1e70d1d459 | |
|
0890e5f9e4 | |
|
683ce2a993 | |
|
318165b7ea | |
|
a79a4fb21e | |
|
e149b48327 | |
|
e1ab6b4a4d | |
|
2bf44b19bf | |
|
964335a753 | |
|
64bcfa6366 | |
|
483dc2fdd5 | |
|
076d160b1e | |
|
18ef834d14 | |
|
b7bdfffae0 | |
|
f4bcfb2d95 | |
|
c286742d69 | |
|
7c53444d79 | |
|
14d9ebdbfc | |
|
7bca8af823 | |
|
6e20fa884a | |
|
39dde1a2d4 | |
|
79943f5b6a | |
|
5e46d3a6a3 | |
|
29258d3f04 | |
|
6d2f3f5fcc | |
|
bf4d1d8860 | |
|
222fe89f6d | |
|
9f249f954d | |
|
22daf9a981 | |
|
517b2537c5 | |
|
8f727a41a5 | |
|
4183fbc48a | |
|
2a292857f7 | |
|
71ad0968f8 | |
|
7124cb8034 | |
|
2beb9d0357 | |
|
58fd993374 | |
|
35c94ed3ce | |
|
4940a61720 | |
|
598955d825 | |
|
f2189e1eeb | |
|
3f5dd59dfd | |
|
3fccad945f | |
|
c393385ef1 | |
|
08a7565fca | |
|
531d6501d2 | |
|
cd23dc1038 | |
|
58c1ecb5c6 | |
|
79b64c14e2 | |
|
42f5266bfa | |
|
e3e6d471a7 | |
|
0f3b58ae7d | |
|
2cdb7600f1 | |
|
aeb7f19f82 | |
|
b9da21ab56 | |
|
d25416f63e | |
|
6968bd8972 | |
|
5ac38335e5 | |
|
a74b22efed | |
|
b800c5dee8 | |
|
158325671c | |
|
4f93af8f06 | |
|
0ee354453c | |
|
22c81a8f1b | |
|
1b40f5bde8 | |
|
4661b6e02c | |
|
54a769f654 | |
|
3e225bbca0 | |
|
26a07e26f6 | |
|
fa139cb034 | |
|
eb78da4354 | |
|
b157f4182c | |
|
924c5ee0af | |
|
5ce62c948a | |
|
9924a6238b | |
|
1880364f4b | |
|
c426ed9b1e | |
|
558765e18f | |
|
cba1c8a3ac | |
|
77bacae8e6 | |
|
2386e8115a | |
|
5a5768204c | |
|
29f2323d59 | |
|
0964df6da6 | |
|
c5f751af5e | |
|
de5c362b51 | |
|
f6a8ad7a03 | |
|
7217ba0c3e | |
|
91f7a75ab3 | |
|
bd746c882d | |
|
9e7188fbf0 | |
|
123b3b0071 | |
|
ae744be345 | |
|
4cec4598ac | |
|
fcc9c1b967 | |
|
d79f404e1f | |
|
450dc5e0d7 | |
|
4f04e93f48 | |
|
198461e056 | |
|
993afa3549 | |
|
81eb820ad9 | |
|
6d57029cd4 | |
|
7dcdf8d951 | |
|
f32166c08a | |
|
2d992bfd99 | |
|
be1a79e260 | |
|
179d759cf7 | |
|
4918768828 | |
|
68a3793185 | |
|
a6355b8279 | |
|
d25f72f9f4 | |
|
8f4095a73c | |
|
9e58975d9c | |
|
c66ed5cf2d | |
|
0ff70fb678 | |
|
5affbed5d1 | |
|
3cb1e59b1c | |
|
f5508c5f90 | |
|
11fdf8fa11 | |
|
ab4c6d0715 | |
|
dc3d8db14a | |
|
2cd20da96b | |
|
c7ffa18d75 | |
|
5a52530c7f |
|
@ -0,0 +1,4 @@
|
|||
ignore:
|
||||
- "pkg/client/.*"
|
||||
- "test/fuzz/.*"
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
paths-ignore:
|
||||
- vendor
|
|
@ -5,16 +5,16 @@ on:
|
|||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
pull_request: { }
|
||||
workflow_dispatch: { }
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.20'
|
||||
GOLANGCI_VERSION: 'v1.55.2'
|
||||
GO_VERSION: '1.23'
|
||||
GOLANGCI_VERSION: 'v2.1'
|
||||
DOCKER_BUILDX_VERSION: 'v0.4.2'
|
||||
|
||||
# Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run
|
||||
|
@ -26,30 +26,30 @@ env:
|
|||
jobs:
|
||||
typos-check:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout Actions Repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Check spelling with custom config file
|
||||
uses: crate-ci/typos@v1.23.5
|
||||
uses: crate-ci/typos@0f0ccba9ed1df83948f0c15026e4f5ccfce46109 # v1.32.0
|
||||
with:
|
||||
config: ./typos.toml
|
||||
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Cache Go Dependencies
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
|
@ -58,14 +58,14 @@ jobs:
|
|||
run: |
|
||||
make generate
|
||||
- name: Lint golang code
|
||||
uses: golangci/golangci-lint-action@v6.0.1
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
version: ${{ env.GOLANGCI_VERSION }}
|
||||
args: --verbose
|
||||
skip-pkg-cache: true
|
||||
mod: readonly
|
||||
- name: Run Trivy vulnerability scanner in repo mode
|
||||
uses: aquasecurity/trivy-action@master
|
||||
uses: aquasecurity/trivy-action@77137e9dc3ab1b329b7c8a38c2eb7475850a14e8 # master
|
||||
with:
|
||||
scan-type: 'fs'
|
||||
ignore-unfixed: true
|
||||
|
@ -73,12 +73,12 @@ jobs:
|
|||
output: 'trivy-results.sarif'
|
||||
severity: 'CRITICAL'
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3.25.15
|
||||
uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
# markdownlint-misspell-shellcheck:
|
||||
# runs-on: ubuntu-20.04
|
||||
# runs-on: ubuntu-24.04
|
||||
# # this image is build from Dockerfile
|
||||
# # https://github.com/pouchcontainer/pouchlinter/blob/master/Dockerfile
|
||||
# container: pouchcontainer/pouchlinter:v0.1.2
|
||||
|
@ -91,7 +91,6 @@ jobs:
|
|||
# run: find ./ -name "*.sh" | grep -v vendor | xargs shellcheck
|
||||
# - name: Lint markdown files
|
||||
# run: find ./ -name "*.md" | grep -v vendor | grep -v commandline | grep -v .github | grep -v swagger | grep -v api | xargs mdl -r ~MD010,~MD013,~MD014,~MD022,~MD024,~MD029,~MD031,~MD032,~MD033,~MD036
|
||||
|
||||
# - name: Check markdown links
|
||||
# run: |
|
||||
# set +e
|
||||
|
@ -106,19 +105,19 @@ jobs:
|
|||
# bash -c "exit $code";
|
||||
|
||||
unit-tests:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Fetch History
|
||||
run: git fetch --prune --unshallow
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Cache Go Dependencies
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
|
@ -128,9 +127,42 @@ jobs:
|
|||
make test
|
||||
git status
|
||||
- name: Publish Unit Test Coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: unittests
|
||||
file: cover.out
|
||||
- name: Check diff
|
||||
run: '[[ -z $(git status -s) ]] || (printf "Existing modified/untracked files.\nPlease run \"make generate manifests\" and push again.\n"; exit 1)'
|
||||
# See: https://google.github.io/oss-fuzz/getting-started/continuous-integration/
|
||||
Fuzzing:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Build Fuzzers
|
||||
id: build
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@abe2c06d0e162320403dd10e8268adbb0b8923f8 # master
|
||||
with:
|
||||
oss-fuzz-project-name: 'openkruise'
|
||||
language: go
|
||||
- name: Run Fuzzers
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@abe2c06d0e162320403dd10e8268adbb0b8923f8 # master
|
||||
with:
|
||||
oss-fuzz-project-name: 'openkruise'
|
||||
language: go
|
||||
fuzz-seconds: 1200
|
||||
output-sarif: true
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./out/artifacts
|
||||
- name: Upload Sarif
|
||||
if: always() && steps.build.outcome == 'success'
|
||||
uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: cifuzz-sarif/results.sarif
|
||||
checkout_path: cifuzz-sarif
|
|
@ -19,6 +19,9 @@ on:
|
|||
branches: [ "master" ]
|
||||
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
|
@ -45,14 +48,13 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3.25.15
|
||||
uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/codeql-config.yml
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
@ -64,7 +66,7 @@ jobs:
|
|||
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3.25.15
|
||||
uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
@ -77,6 +79,6 @@ jobs:
|
|||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3.25.15
|
||||
uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
|
|
@ -13,6 +13,16 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.HUB_KRIUSE }}
|
||||
- name: Build the Docker image
|
||||
run: IMG=openkruise/kruise-manager:${{ github.ref_name }} & make docker-multiarch
|
||||
run: |
|
||||
docker buildx create --use --platform=linux/amd64,linux/arm64,linux/ppc64le --name multi-platform-builder
|
||||
docker buildx ls
|
||||
IMG=openkruise/kruise-manager:${{ github.ref_name }} make docker-multiarch
|
||||
|
|
|
@ -1,542 +0,0 @@
|
|||
name: E2E-1.18
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.20'
|
||||
KIND_VERSION: 'v0.14.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.18.20'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='\[apps\] StatefulSet' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "$out"
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
echo "$out"
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
kubectl get pods -n kruise-system -l control-plane=daemon -o=jsonpath="{range .items[*]}{.metadata.namespace}{\"\t\"}{.metadata.name}{\"\n\"}{end}" | while read ns name;
|
||||
do
|
||||
restartCount=$(kubectl get pod -n ${ns} ${name} --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-daemon has not restarted"
|
||||
else
|
||||
kubectl get pods -n ${ns} -l control-plane=daemon --no-headers
|
||||
echo "Kruise-daemon has restarted, abort!!!"
|
||||
kubectl logs -p -n ${ns} ${name}
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
pullimages-containerrecreate:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='\[apps\] (PullImage|ContainerRecreateRequest|PullImages)' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
kubectl get pods -n kruise-system -l control-plane=daemon -o=jsonpath="{range .items[*]}{.metadata.namespace}{\"\t\"}{.metadata.name}{\"\n\"}{end}" | while read ns name;
|
||||
do
|
||||
restartCount=$(kubectl get pod -n ${ns} ${name} --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-daemon has not restarted"
|
||||
else
|
||||
kubectl get pods -n ${ns} -l control-plane=daemon --no-headers
|
||||
echo "Kruise-daemon has restarted, abort!!!"
|
||||
kubectl logs -p -n ${ns} ${name}
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
advanced-daemonset:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='\[apps\] DaemonSet' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
kubectl get pods -n kruise-system -l control-plane=daemon -o=jsonpath="{range .items[*]}{.metadata.namespace}{\"\t\"}{.metadata.name}{\"\n\"}{end}" | while read ns name;
|
||||
do
|
||||
restartCount=$(kubectl get pod -n ${ns} ${name} --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-daemon has not restarted"
|
||||
else
|
||||
kubectl get pods -n ${ns} -l control-plane=daemon --no-headers
|
||||
echo "Kruise-daemon has restarted, abort!!!"
|
||||
kubectl logs -p -n ${ns} ${name}
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
sidecarset:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='\[apps\] SidecarSet' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
kubectl get pods -n kruise-system -l control-plane=daemon -o=jsonpath="{range .items[*]}{.metadata.namespace}{\"\t\"}{.metadata.name}{\"\n\"}{end}" | while read ns name;
|
||||
do
|
||||
restartCount=$(kubectl get pod -n ${ns} ${name} --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-daemon has not restarted"
|
||||
else
|
||||
kubectl get pods -n ${ns} -l control-plane=daemon --no-headers
|
||||
echo "Kruise-daemon has restarted, abort!!!"
|
||||
kubectl logs -p -n ${ns} ${name}
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
podUnavailableBudget:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='\[policy\] PodUnavailableBudget' test/e2e
|
||||
retVal=$?
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
other:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
kubectl get pods -n kruise-system -l control-plane=daemon -o=jsonpath="{range .items[*]}{.metadata.namespace}{\"\t\"}{.metadata.name}{\"\n\"}{end}" | while read ns name;
|
||||
do
|
||||
restartCount=$(kubectl get pod -n ${ns} ${name} --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-daemon has not restarted"
|
||||
else
|
||||
kubectl get pods -n ${ns} -l control-plane=daemon --no-headers
|
||||
echo "Kruise-daemon has restarted, abort!!!"
|
||||
kubectl logs -p -n ${ns} ${name}
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
|
@ -1,113 +0,0 @@
|
|||
name: E2E-1.20-EphemeralJob
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.20'
|
||||
KIND_VERSION: 'v0.14.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.20.15'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
ephemeraljob:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -v --focus='\[apps\] EphemeralJob' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "$out"
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
echo "$out"
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
kubectl get pods -n kruise-system -l control-plane=daemon -o=jsonpath="{range .items[*]}{.metadata.namespace}{\"\t\"}{.metadata.name}{\"\n\"}{end}" | while read ns name;
|
||||
do
|
||||
restartCount=$(kubectl get pod -n ${ns} ${name} --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-daemon has not restarted"
|
||||
else
|
||||
kubectl get pods -n ${ns} -l control-plane=daemon --no-headers
|
||||
echo "Kruise-daemon has restarted, abort!!!"
|
||||
kubectl logs -p -n ${ns} ${name}
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
|
@ -13,26 +13,108 @@ permissions: read-all
|
|||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.20'
|
||||
GO_VERSION: '1.23'
|
||||
KIND_ACTION_VERSION: 'v1.3.0'
|
||||
KIND_VERSION: 'v0.14.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.24.6'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-20.04
|
||||
astatefulset-storage:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -p -v --focus='\[apps\] AppStatefulSetStorage' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -101,17 +183,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
pullimages-containerrecreate:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -193,17 +275,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
advanced-daemonset:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -285,17 +367,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
sidecarset:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -377,17 +459,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
ephemeraljob:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -447,17 +529,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
podUnavailableBudget:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -512,17 +594,17 @@ jobs:
|
|||
fi
|
||||
exit $retVal
|
||||
other:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -536,6 +618,8 @@ jobs:
|
|||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl create ns kruise-system
|
||||
kubectl apply -f test/kruise-e2e-config.yaml
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
|
@ -567,7 +651,8 @@ jobs:
|
|||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubeflow/training-operator/refs/heads/v1.8-branch/manifests/base/crds/kubeflow.org_tfjobs.yaml
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
|
|
|
@ -13,25 +13,107 @@ permissions: read-all
|
|||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.20'
|
||||
GO_VERSION: '1.23'
|
||||
KIND_VERSION: 'v0.18.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.26.3'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-20.04
|
||||
astatefulset-storage:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -p -v --focus='\[apps\] AppStatefulSetStorage' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -100,17 +182,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
pullimages-containerrecreate:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -192,17 +274,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
advanced-daemonset:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -284,17 +366,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
sidecarset:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -376,17 +458,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
ephemeraljob:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -446,17 +528,17 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
podUnavailableBudget:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -511,17 +593,17 @@ jobs:
|
|||
fi
|
||||
exit $retVal
|
||||
other:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
|
@ -535,6 +617,8 @@ jobs:
|
|||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl create ns kruise-system
|
||||
kubectl apply -f test/kruise-e2e-config.yaml
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
|
@ -566,7 +650,8 @@ jobs:
|
|||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubeflow/training-operator/refs/heads/v1.8-branch/manifests/base/crds/kubeflow.org_tfjobs.yaml
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
|
|
|
@ -13,29 +13,85 @@ permissions: read-all
|
|||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.20'
|
||||
GO_VERSION: '1.23'
|
||||
KIND_VERSION: 'v0.22.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.28.7'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-20.04
|
||||
astatefulset-storage:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 60m -p -v --focus='\[apps\] AppStatefulSetStorage' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -44,33 +100,7 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
|
@ -100,21 +130,21 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
pullimages-containerrecreate:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -123,33 +153,7 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
|
@ -192,21 +196,21 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
advanced-daemonset:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -215,33 +219,7 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
|
@ -284,21 +262,21 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
sidecarset:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -307,33 +285,7 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
DISABLE_E2E_CONFIG=true IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
|
@ -376,21 +328,21 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
ephemeraljob:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -399,33 +351,7 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
|
@ -446,21 +372,21 @@ jobs:
|
|||
exit $retVal
|
||||
|
||||
podUnavailableBudget:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -469,33 +395,7 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
|
@ -510,22 +410,79 @@ jobs:
|
|||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
other:
|
||||
runs-on: ubuntu-20.04
|
||||
clonesetAndInplace:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-none-fg.yaml
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -p -timeout 120m -v --focus='\[apps\] (InplaceVPA)' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
echo "Kruise-manager has not restarted"
|
||||
else
|
||||
kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers
|
||||
echo "Kruise-manager has restarted, abort!!!"
|
||||
kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system
|
||||
exit 1
|
||||
fi
|
||||
if [ "$retVal" -ne 0 ];then
|
||||
echo "test fail, dump kruise-manager logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}')
|
||||
echo "test fail, dump kruise-daemon logs"
|
||||
while read pod; do
|
||||
kubectl logs -n kruise-system $pod
|
||||
done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}')
|
||||
fi
|
||||
exit $retVal
|
||||
|
||||
other:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
|
@ -534,39 +491,14 @@ jobs:
|
|||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
set -ex
|
||||
kubectl cluster-info
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
|
||||
NODES=$(kubectl get node | wc -l)
|
||||
for ((i=1;i<10;i++));
|
||||
do
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
set +e
|
||||
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l)
|
||||
kubectl get node -o yaml
|
||||
kubectl get all -n kruise-system -o yaml
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system
|
||||
kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true
|
||||
set -e
|
||||
if [ "$PODS" -eq "$NODES" ]; then
|
||||
echo "Wait for kruise-manager and kruise-daemon ready successfully"
|
||||
else
|
||||
echo "Timeout to wait for kruise-manager and kruise-daemon ready"
|
||||
exit 1
|
||||
fi
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
make ginkgo
|
||||
set +e
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubeflow/training-operator/refs/heads/v1.8-branch/manifests/base/crds/kubeflow.org_tfjobs.yaml
|
||||
./bin/ginkgo -timeout 90m -v --skip='\[apps\] (InplaceVPA|AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e
|
||||
retVal=$?
|
||||
restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}')
|
||||
if [ "${restartCount}" -eq "0" ];then
|
||||
|
|
|
@ -0,0 +1,298 @@
|
|||
name: E2E-1.30
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.23'
|
||||
KIND_VERSION: 'v0.22.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.30.8'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
astatefulset-storage:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] AppStatefulSetStorage' --print-info
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] StatefulSet' --print-info
|
||||
|
||||
pullimages-containerrecreate:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] (PullImage|ContainerRecreateRequest|PullImages)' --print-info --disable-parallel
|
||||
|
||||
advanced-daemonset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] DaemonSet' --print-info
|
||||
|
||||
sidecarset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
DISABLE_E2E_CONFIG=true IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] SidecarSet' --print-info
|
||||
|
||||
ephemeraljob:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] EphemeralJob' --print-info
|
||||
|
||||
podUnavailableBudget:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[policy\] PodUnavailableBudget' --print-info
|
||||
|
||||
clonesetAndInplace:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] (CloneSet|InplaceVPA)' --print-info
|
||||
|
||||
other:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus "" --skip '\[apps\] (CloneSet|InplaceVPA|AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip '\[policy\] PodUnavailableBudget' --timeout 90m --print-info
|
|
@ -0,0 +1,298 @@
|
|||
name: E2E-1.32
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
pull_request: {}
|
||||
workflow_dispatch: {}
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.23'
|
||||
KIND_VERSION: 'v0.22.0'
|
||||
KIND_IMAGE: 'kindest/node:v1.32.0'
|
||||
KIND_CLUSTER_NAME: 'ci-testing'
|
||||
|
||||
jobs:
|
||||
astatefulset-storage:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] AppStatefulSetStorage' --print-info
|
||||
|
||||
astatefulset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] StatefulSet' --print-info
|
||||
|
||||
pullimages-containerrecreate:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] (PullImage|ContainerRecreateRequest|PullImages)' --print-info --disable-parallel
|
||||
|
||||
advanced-daemonset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] DaemonSet' --print-info
|
||||
|
||||
sidecarset:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
DISABLE_E2E_CONFIG=true IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] SidecarSet' --print-info
|
||||
|
||||
ephemeraljob:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] EphemeralJob' --print-info
|
||||
|
||||
podUnavailableBudget:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[policy\] PodUnavailableBudget' --print-info
|
||||
|
||||
clonesetAndInplace:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Install-CSI
|
||||
run: |
|
||||
make install-csi
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus '\[apps\] (CloneSet|InplaceVPA)' --print-info
|
||||
|
||||
other:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ env.KIND_IMAGE }}
|
||||
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
|
||||
config: ./test/kind-conf-with-vpa.yaml
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
- name: Build image
|
||||
run: |
|
||||
export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}"
|
||||
docker build --pull --no-cache . -t $IMAGE
|
||||
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
|
||||
- name: Install Kruise
|
||||
run: |
|
||||
IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} make install-kruise
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
export KUBECONFIG=/home/runner/.kube/config
|
||||
tools/hack/run-kruise-e2e-test.sh --focus "" --skip '\[apps\] (CloneSet|InplaceVPA|AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip '\[policy\] PodUnavailableBudget' --timeout 90m --print-info
|
|
@ -15,12 +15,12 @@ permissions: read-all
|
|||
|
||||
jobs:
|
||||
license_check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
name: Check for unapproved licenses
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set up Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
uses: ruby/setup-ruby@a4effe49ee8ee5b8b5091268c473a4628afb5651 # v1.245.0
|
||||
with:
|
||||
ruby-version: 2.6
|
||||
- name: Install dependencies
|
||||
|
|
|
@ -32,12 +32,12 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
|
@ -59,7 +59,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
@ -67,6 +67,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@9c646c24a4c8410122b0d6a1311088e9377eea95 # v2.25.0
|
||||
uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.25.0
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
136
.golangci.yml
136
.golangci.yml
|
@ -1,93 +1,73 @@
|
|||
# options for analysis running
|
||||
version: "2"
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 5m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# list of build tags, all linters use it. Default is empty list.
|
||||
#build-tags:
|
||||
# - mytag
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- apis
|
||||
- pkg/client
|
||||
- vendor
|
||||
- test
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
#local-prefixes: github.com/openkruise/kruise
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: default
|
||||
#ignore-words:
|
||||
# - someword
|
||||
depguard:
|
||||
rules:
|
||||
forbid-pkg-errors:
|
||||
deny:
|
||||
- pkg: "github.com/pkg/errors"
|
||||
dsc: Should be replaced with standard lib errors or fmt.Errorf
|
||||
|
||||
formats:
|
||||
text:
|
||||
path: stdout
|
||||
colors: true
|
||||
linters:
|
||||
fast: false
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
# TODO Enforce the below linters later
|
||||
- gofmt
|
||||
- depguard
|
||||
- govet
|
||||
- goimports
|
||||
- ineffassign
|
||||
- misspell
|
||||
- vet
|
||||
- unconvert
|
||||
- unused
|
||||
- depguard
|
||||
issues:
|
||||
exclude:
|
||||
# staticcheck
|
||||
- 'SA1019: package github.com/golang/protobuf/proto is deprecated: Use the "google.golang.org/protobuf/proto" package instead'
|
||||
settings:
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
depguard:
|
||||
rules:
|
||||
forbid-pkg-errors:
|
||||
deny:
|
||||
- pkg: "github.com/pkg/errors"
|
||||
desc: Should be replaced with standard lib errors or fmt.Errorf
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- path: (.+)\.go$
|
||||
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated: Use the "google.golang.org/protobuf/proto" package instead'
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- apis
|
||||
- pkg/client
|
||||
- vendor
|
||||
- test
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gofmt:
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
local-prefixes:
|
||||
- github.com/openkruise/kruise
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- apis
|
||||
- pkg/client
|
||||
- vendor
|
||||
- test
|
||||
|
|
138
CHANGELOG.md
138
CHANGELOG.md
|
@ -1,5 +1,125 @@
|
|||
# Change Log
|
||||
|
||||
## v1.8.2
|
||||
> Change log since v1.8.1
|
||||
|
||||
### Bug fixes
|
||||
- Fix kruise-daemon panic exception due to PodProbeMarker when container is nil. ([#1974](https://github.com/openkruise/kruise/pull/1974), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
## v1.8.1
|
||||
> Change log since v1.8.0
|
||||
|
||||
### Bug fixes
|
||||
- JobSidecarTerminator support ignore exit code capability via env. ([#1949](https://github.com/openkruise/kruise/pull/1949), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
### Performance Improvements
|
||||
- Performance optimized PodProbeMarker to reduce many invalid patch operations ([#2007](https://github.com/openkruise/kruise/pull/2007), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
## v1.7.4
|
||||
> Change log since v1.7.3
|
||||
|
||||
### Bug fixes
|
||||
- JobSidecarTerminator support ignore exit code capability via env. ([#1949](https://github.com/openkruise/kruise/pull/1949), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
## v1.8.0
|
||||
|
||||
> Change log since v1.7.3
|
||||
### Upgrade Notice
|
||||
> No, really, you must read this before you upgrade
|
||||
- **Disable** the following feature gates by default: ResourcesDeletionProtection ([#1919](https://github.com/openkruise/kruise/pull/1919), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Promote these feature gates to beta:
|
||||
`ResourcesDeletionProtection`, `WorkloadSpread`, `PodUnavailableBudgetDeleteGate`, `InPlaceUpdateEnvFromMetadata`,
|
||||
`StatefulSetAutoDeletePVC`,
|
||||
`PodProbeMarkerGate` ([#1919](https://github.com/openkruise/kruise/pull/1919), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Update Kubernetes dependency to v1.30.10 and Golang to v1.22 ([#1896](https://github.com/openkruise/kruise/pull/1896), [@ABNER-1](https://github.com/ABNER-1), [#1924](https://github.com/openkruise/kruise/pull/1924), [@furykerry](https://github.com/furykerry)))
|
||||
- Prior to Kruise 1.7.3, `helm uninstall` is a **high-risk** operation that deletes Kruise, its CRDs, and associated CRs. Starting from Kruise 1.7.3, it uses a pre-delete hook to check for existing Kruise CRs before uninstallation and blocks the process to prevent accidental deletion.
|
||||
|
||||
### Key Features
|
||||
- Support in-place expansion of StatefulSet volumes ([#1674](https://github.com/openkruise/kruise/pull/1674), [#1714](https://github.com/openkruise/kruise/pull/1714), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Enable in-place resource resizing for CloneSet, Advanced StatefulSet, and Advanced DaemonSet ([#1353](https://github.com/openkruise/kruise/pull/1353), [#1866](https://github.com/openkruise/kruise/pull/1866), [@LavenderQAQ](https://github.com/LavenderQAQ), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Support adaptive scheduling strategy for UnitedDeployment ([#1720](https://github.com/openkruise/kruise/pull/1720), [@AiRanthem](https://github.com/AiRanthem))
|
||||
- Add WorkloadSpread support for AI workload like TFJob in KubeFlow ([#1838](https://github.com/openkruise/kruise/pull/1838), [@AiRanthem](https://github.com/AiRanthem))
|
||||
|
||||
### Performance Improvements
|
||||
- Optimize CA bundle updates to reduce unnecessary changes ([#1717](https://github.com/openkruise/kruise/pull/1717), [@zmberg](https://github.com/zmberg))
|
||||
- Add disableDeepCopy for BroadcastJob ([#1696](https://github.com/openkruise/kruise/pull/1696), [@Prepmachine4](https://github.com/Prepmachine4))
|
||||
|
||||
### Resilience Enhancement
|
||||
- Add Helm pre-delete hook to preserve Kruise CRs during uninstallation ([#1843](https://github.com/openkruise/kruise/pull/1843), [@AiRanthem](https://github.com/AiRanthem))
|
||||
|
||||
### Other Notable Changes
|
||||
#### Advanced Workload
|
||||
- Add lifecycle hooks and tests for Advanced StatefulSet ([#1858](https://github.com/openkruise/kruise/pull/1858), [@mingzhou.swx](https://github.com/mingzhou.swx), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Add range-based reserveOrdinals support for Advanced StatefulSet ([#1873](https://github.com/openkruise/kruise/pull/1873), [@AiRanthem](https://github.com/AiRanthem))
|
||||
- Redefined partition semantics to represent non-updated pod count ([#1819](https://github.com/openkruise/kruise/pull/1819), [@ABNER-1](https://github.com/ABNER-1); [#1751](https://github.com/openkruise/kruise/pull/1751), [@zybtakeit](https://github.com/zybtakeit), [@ABNER-1](https://github.com/ABNER-1))
|
||||
|
||||
#### Sidecar Management
|
||||
- Support inject both stable and updated version sidecar according to updateStrategy ([#1689](https://github.com/openkruise/kruise/pull/1689), [#1856](https://github.com/openkruise/kruise/pull/1856), [@AiRanthem](https://github.com/AiRanthem))
|
||||
- Refine SidecarSet initContainer handling ([#1719](https://github.com/openkruise/kruise/pull/1719), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
#### Multi-domain management
|
||||
- Introduce `pub.kruise.io/disable-fetch-replicas-from-workload=true` annotation for CRD compatibility ([#1758](https://github.com/openkruise/kruise/pull/1758), [@zmberg](https://github.com/zmberg))
|
||||
- Extend PodProbeMarker to serverless pods ([#1875](https://github.com/openkruise/kruise/pull/1875), [@zmberg](https://github.com/zmberg))
|
||||
- Enable priorityClassName patching in WorkloadSpread ([#1877](https://github.com/openkruise/kruise/pull/1877), [@AiRanthem](https://github.com/AiRanthem))
|
||||
- Sync all fields in UnitedDeployment spec to subset workload spec ([#1798](https://github.com/openkruise/kruise/pull/1798), [@AiRanthem](https://github.com/AiRanthem))
|
||||
|
||||
### Bug Fixes
|
||||
- Resolve token permission and dependency pinning issues ([#1707](https://github.com/openkruise/kruise/pull/1707), [@harshitasao](https://github.com/harshitasao))
|
||||
- Fix PyTorchJob pod creation failures ([#1864](https://github.com/openkruise/kruise/pull/1864), [@zmberg](https://github.com/zmberg))
|
||||
- Correct ImagePullJob timeout handling (>1800s) ([#1874](https://github.com/openkruise/kruise/pull/1874), [@zmberg](https://github.com/zmberg))
|
||||
- Resolve cri-dockerd runtime detection issues ([#1899](https://github.com/openkruise/kruise/pull/1899), [@FlikweertvisionVadym](https://github.com/FlikweertvisionVadym))
|
||||
- Remove pod ownerRef requirement in pub webhook ([#1869](https://github.com/openkruise/kruise/pull/1869), [@zmberg](https://github.com/zmberg))
|
||||
- Address maxUnavailable blocking in SidecarSet updates ([#1834](https://github.com/openkruise/kruise/pull/1834), [@zmberg](https://github.com/zmberg))
|
||||
- Fix CloneSet controller block from scale expectation leaks ([#1829](https://github.com/openkruise/kruise/pull/1829), [@zmberg](https://github.com/zmberg))
|
||||
- Enforce imagePullPolicy=Always for ImagePullJob ([#1830](https://github.com/openkruise/kruise/pull/1830), [@zmberg](https://github.com/zmberg))
|
||||
- Fix WorkloadSpread webhook panics ([#1807](https://github.com/openkruise/kruise/pull/1807), [@AiRanthem](https://github.com/AiRanthem))
|
||||
|
||||
### Misc (Chores and tests)
|
||||
- Standardize on CRI for image pulls ([#1867](https://github.com/openkruise/kruise/pull/1867), [@furykerry](https://github.com/furykerry))
|
||||
- Introduce JSON log formatting ([#1703](https://github.com/openkruise/kruise/pull/1703), [@zmberg](https://github.com/zmberg))
|
||||
- Remove Docker runtime dependency ([#1870](https://github.com/openkruise/kruise/pull/1870),[@furykerry](https://github.com/furykerry))
|
||||
- Improve test parallelism and reliability ([#1743](https://github.com/openkruise/kruise/pull/1743), [@MichaelRren](https://github.com/MichaelRren))
|
||||
- Enhance WorkloadSpread validation logic ([#1740](https://github.com/openkruise/kruise/pull/1740), [@AiRanthem](https://github.com/AiRanthem))
|
||||
- Launche Kruise Guru on Gurubase.io ([#1800](https://github.com/openkruise/kruise/pull/1800), [@kursataktas](https://github.com/kursataktas))
|
||||
- Improve documentation accuracy ([#1824](https://github.com/openkruise/kruise/pull/1824), [@furykerry](https://github.com/furykerry))
|
||||
- Fix KIND installation issues ([#1688](https://github.com/openkruise/kruise/pull/1688),[@ABNER-1](https://github.com/ABNER-1))
|
||||
- Avoid overriding namespace config after deploying ([#1772](https://github.com/openkruise/kruise/pull/1772),[@hantmac](https://github.com/hantmac))
|
||||
- Fix WorkloadSpread test flakiness by removing dependencies ([#1895](https://github.com/openkruise/kruise/pull/1895), [@AiRanthem](https://github.com/AiRanthem))
|
||||
- Address SidecarSet e2e test failures ([#1724](https://github.com/openkruise/kruise/pull/1724), [@zmberg](https://github.com/zmberg))
|
||||
- Enhance unit test stability ([#1784](https://github.com/openkruise/kruise/pull/1784), [@AiRanthem](https://github.com/AiRanthem))
|
||||
|
||||
## v1.7.3
|
||||
> Change log since v1.7.2
|
||||
|
||||
### Bug fixes
|
||||
- Fix kubeflow PyTorchJob create pod failure due to pod webhook. ([#1734](https://github.com/openkruise/kruise/pull/1864), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
## v1.7.2
|
||||
> Change log since v1.7.1
|
||||
|
||||
### Advanced Workload
|
||||
- Support specified-delete in AdvancedStatefulSet and handle specified deleted pod under maxUnavailable constrain. ([#1734](https://github.com/openkruise/kruise/pull/1734), [@ABNER-1](https://github.com/ABNER-1))
|
||||
|
||||
## v1.6.4
|
||||
> Change log since v1.6.3
|
||||
|
||||
### Advanced Workload
|
||||
- Support specified-delete in AdvancedStatefulSet and handle specified deleted pod under maxUnavailable constrain. ([#1734](https://github.com/openkruise/kruise/pull/1734), [@ABNER-1](https://github.com/ABNER-1))
|
||||
|
||||
## v1.5.5
|
||||
> Chang log since v1.5.4
|
||||
|
||||
### Advanced Workload
|
||||
- Support specified-delete in AdvancedStatefulSet and handle specified deleted pod under maxUnavailable constrain. ([#1734](https://github.com/openkruise/kruise/pull/1734), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Advanced StatefulSet maxUnavailable now counts unavailable pods with smaller ordinal in the update order during rolling upgrade. ([#1480](https://github.com/openkruise/kruise/pull/1480), [@Yesphet](https://github.com/Yesphet))
|
||||
|
||||
## v1.7.1
|
||||
> Change log since v1.7.0
|
||||
|
||||
### Bug fixes
|
||||
- When update crd webhook caBundle, if caBundle does not change, do not update crd again. ([#1717](https://github.com/openkruise/kruise/pull/1717), [@zmberg](https://github.com/zmberg))
|
||||
- Remove normal init container in pod's sidecarSet in-place update annotation. ([#1719](https://github.com/openkruise/kruise/pull/1719), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
## v1.7.0
|
||||
> Change log since v1.6.3
|
||||
|
||||
|
@ -7,7 +127,7 @@
|
|||
- When CloneSet volumeClaimTemplates changed, always recreate pods and related volumes. ([#1561](https://github.com/openkruise/kruise/pull/1561), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Bump K8s dependency to 1.28, and OpenKruise still works with Kubernetes Version >= 1.18. ([#1598](https://github.com/openkruise/kruise/pull/1598), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- SidecarSet support k8s 1.28 Sidecar Containers(initContainers[x].restartPolicy=Always), and significantly improves the lifecycle management of Sidecar containers,
|
||||
refer to the [community documentation](https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/) for details. ([#1613](https://github.com/openkruise/kruise/pull/1613), [@zmberg](https://github.com/zmberg))
|
||||
refer to the [community documentation](https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/) for details. ([#1613](https://github.com/openkruise/kruise/pull/1613), [@zmberg](https://github.com/zmberg))
|
||||
- ImagePullJob support for credential provider plugin, e.g. aws. ([#1383](https://github.com/openkruise/kruise/pull/1383), [@Kuromesi](https://github.com/Kuromesi))
|
||||
- Advanced StatefulSet support [start ordinal](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#start-ordinal). ([#1643](https://github.com/openkruise/kruise/pull/1643), [@ABNER-1](https://github.com/ABNER-1))
|
||||
- Support webhook CA injection using external certification management tool, e.g. [cert-manager](https://cert-manager.io/). ([#1665](https://github.com/openkruise/kruise/pull/1665), [@Kuromesi](https://github.com/Kuromesi))
|
||||
|
@ -92,14 +212,14 @@ refer to the [community documentation](https://kubernetes.io/docs/concepts/workl
|
|||
|
||||
> No, really, you must read this before you upgrade
|
||||
- OpenKruise no longer supports Kubernetes versions 1.16, 1.17.
|
||||
However it's still possible to use OpenKruise with Kubernetes versions 1.16 and 1.17 as long as KruiseDaemon is not enabled(install/upgrade kruise charts with featureGates="KruiseDaemon=false")
|
||||
However it's still possible to use OpenKruise with Kubernetes versions 1.16 and 1.17 as long as KruiseDaemon is not enabled(install/upgrade kruise charts with featureGates="KruiseDaemon=false")
|
||||
- Kruise-Daemon will no longer support v1alpha2 CRI runtimes.
|
||||
However it's still possible to use OpenKruise on Kubernetes with nodes that only support v1alpha2 CRI as long as KruiseDaemon is not enabled(install/upgrade kruise charts with featureGates="KruiseDaemon=false")
|
||||
However it's still possible to use OpenKruise on Kubernetes with nodes that only support v1alpha2 CRI as long as KruiseDaemon is not enabled(install/upgrade kruise charts with featureGates="KruiseDaemon=false")
|
||||
- OpenKruise leader election default to use leases mode. ([#1407](https://github.com/openkruise/kruise/pull/1407), [dsxing](https://github.com/dsxing))
|
||||
For users with OpenKruise version 1.3.0 or lower, please first upgrade your OpenKruise to version 1.4 or 1.5 before upgrading to 1.6.0, so as to avoid unexpected multiple leader problem during the installation.
|
||||
For users with OpenKruise version 1.3.0 or lower, please first upgrade your OpenKruise to version 1.4 or 1.5 before upgrading to 1.6.0, so as to avoid unexpected multiple leader problem during the installation.
|
||||
- Bump Kubernetes dependency to 1.26.10. ([#1511](https://github.com/openkruise/kruise/pull/1511), [KaiShi](https://github.com/BH4AWS))
|
||||
- To avoid potential circular dependency problem, features rely on webhook will no longer work for resources under kube-system,
|
||||
e.g. SidecarSet, WorkloadSpread, PodUnavailableBudget, ContainerLaunchPriority and PersistentPodState. ([#92](https://github.com/openkruise/charts/pull/92), [@hantmac](https://github.com/hantmac))
|
||||
e.g. SidecarSet, WorkloadSpread, PodUnavailableBudget, ContainerLaunchPriority and PersistentPodState. ([#92](https://github.com/openkruise/charts/pull/92), [@hantmac](https://github.com/hantmac))
|
||||
|
||||
### Key Features
|
||||
- Fix WorkloadSpread incorrect subset allocation after workload rolling updating. ([#1197](https://github.com/openkruise/kruise/pull/1197), [veophi](https://github.com/veophi))
|
||||
|
@ -234,7 +354,7 @@ thanks!
|
|||
> No, really, you must read this before you upgrade
|
||||
|
||||
- Enable following feature-gates by default: ResourcesDeletionProtection, WorkloadSpread, PodUnavailableBudgetDeleteGate, InPlaceUpdateEnvFromMetadata,
|
||||
StatefulSetAutoDeletePVC, PodProbeMarkerGate. ([#1214](https://github.com/openkruise/kruise/pull/1214), [@zmberg](https://github.com/zmberg))
|
||||
StatefulSetAutoDeletePVC, PodProbeMarkerGate. ([#1214](https://github.com/openkruise/kruise/pull/1214), [@zmberg](https://github.com/zmberg))
|
||||
- Change Kruise leader election from configmap to configmapsleases, this is a smooth upgrade with no disruption to OpenKruise service. ([#1184](https://github.com/openkruise/kruise/pull/1184), [@YTGhost](https://github.com/YTGhost))
|
||||
|
||||
### New Feature: JobSidecarTerminator
|
||||
|
@ -296,7 +416,7 @@ So the Probe capabilities provided in Kubernetes have defined specific semantics
|
|||
**In addition, there is actually a need to customize Probe semantics and related behaviors**, such as:
|
||||
- **GameServer defines Idle Probe to determine whether the Pod currently has a game match**, if not, from the perspective of cost optimization, the Pod can be scaled down.
|
||||
- **K8S Operator defines the main-secondary probe to determine the role of the current Pod (main or secondary)**. When upgrading, the secondary can be upgraded first,
|
||||
so as to achieve the behavior of selecting the main only once during the upgrade process, reducing the service interruption time during the upgrade process.
|
||||
so as to achieve the behavior of selecting the main only once during the upgrade process, reducing the service interruption time during the upgrade process.
|
||||
|
||||
So we provides the ability to customize the Probe and return the result to the Pod yaml.
|
||||
|
||||
|
@ -843,7 +963,7 @@ spec:
|
|||
Since v0.7.0:
|
||||
|
||||
1. OpenKruise requires Kubernetes 1.13+ because of CRD conversion.
|
||||
Note that for Kubernetes 1.13 and 1.14, users must enable `CustomResourceWebhookConversion` feature-gate in kube-apiserver before install or upgrade Kruise.
|
||||
Note that for Kubernetes 1.13 and 1.14, users must enable `CustomResourceWebhookConversion` feature-gate in kube-apiserver before install or upgrade Kruise.
|
||||
2. OpenKruise official image supports multi-arch, by default including linux/amd64, linux/arm64, and linux/arm platforms.
|
||||
|
||||
### A NEW workload controller - AdvancedCronJob
|
||||
|
@ -1191,4 +1311,4 @@ It provides full features for more efficient, deterministic and controlled deplo
|
|||
#### Features
|
||||
|
||||
- Add SidecarSet that automatically injects sidecar container into selected pods
|
||||
- Support sidecar update functionality for SidecarSet
|
||||
- Support sidecar update functionality for SidecarSet
|
|
@ -1,6 +1,6 @@
|
|||
# Contributing to Openkruise
|
||||
|
||||
Welcome to Openkruise! Openkruise consists several repositories under the organization.
|
||||
Welcome to Openkruise! Openkruise consists of several repositories under the organization.
|
||||
We encourage you to help out by reporting issues, improving documentation, fixing bugs, or adding new features.
|
||||
Please also take a look at our code of conduct, which details how contributors are expected to conduct themselves as part of the Openkruise community.
|
||||
|
||||
|
@ -10,7 +10,7 @@ To be honest, we regard every user of Openkruise as a very kind contributor.
|
|||
After experiencing Openkruise, you may have some feedback for the project.
|
||||
Then feel free to open an issue.
|
||||
|
||||
There are lot of cases when you could open an issue:
|
||||
There are a lot of cases when you could open an issue:
|
||||
|
||||
- bug report
|
||||
- feature request
|
||||
|
@ -20,11 +20,11 @@ There are lot of cases when you could open an issue:
|
|||
- help wanted
|
||||
- doc incomplete
|
||||
- test improvement
|
||||
- any questions on project
|
||||
- any questions on the project
|
||||
- and so on
|
||||
|
||||
Also we must remind that when filing a new issue, please remember to remove the sensitive data from your post.
|
||||
Sensitive data could be password, secret key, network locations, private business data and so on.
|
||||
Also, we must remind you that when filing a new issue, please remember to remove the sensitive data from your post.
|
||||
Sensitive data could be passwords, secret keys, network locations, private business data, and so on.
|
||||
|
||||
## Code and doc contribution
|
||||
|
||||
|
@ -45,13 +45,14 @@ On GitHub, every improvement for Openkruise could be via a PR (short for pull re
|
|||
### Workspace Preparation
|
||||
|
||||
To put forward a PR, we assume you have registered a GitHub ID.
|
||||
Then you could finish the preparation in the following steps:
|
||||
Then you can finish the preparation in the following steps:
|
||||
|
||||
1. **Fork** Fork the repository you wish to work on. You just need to click the button Fork in right-left of project repository main page. Then you will end up with your repository in your GitHub username.
|
||||
2. **Clone** your own repository to develop locally. Use `git clone https://github.com/<your-username>/<project>.git` to clone repository to your local machine. Then you can create new branches to finish the change you wish to make.
|
||||
1. **Fork** Fork the repository you wish to work on. You just need to click the button Fork in the right-left of the project repository main page. Then you will end up with your repository in your GitHub username.
|
||||
2. **Clone** your own repository to develop locally. Use `git clone https://github.com/<your-username>/<project>.git` to clone the repository to your local machine. Then you can create new branches to finish the change you wish to make.
|
||||
3. **Set remote** upstream to be `https://github.com/openkruise/<project>.git` using the following two commands:
|
||||
|
||||
```bash
|
||||
cd <project>
|
||||
git remote add upstream https://github.com/openkruise/<project>.git
|
||||
git remote set-url --push upstream no-pushing
|
||||
```
|
||||
|
@ -60,7 +61,7 @@ Adding this, we can easily synchronize local branches with upstream branches.
|
|||
|
||||
4. **Create a branch** to add a new feature or fix issues
|
||||
|
||||
Update local working directory:
|
||||
Update the local working directory:
|
||||
|
||||
```bash
|
||||
cd <project>
|
||||
|
@ -79,16 +80,16 @@ Make any change on the new-branch then build and test your codes.
|
|||
|
||||
### PR Description
|
||||
|
||||
PR is the only way to make change to Kruise project files.
|
||||
To help reviewers better get your purpose, PR description could not be too detailed.
|
||||
PR is the only way to make changes to Kruise project files.
|
||||
To help reviewers better understand your purpose, PR description could not be too detailed.
|
||||
We encourage contributors to follow the [PR template](./.github/PULL_REQUEST_TEMPLATE.md) to finish the pull request.
|
||||
|
||||
### Developing Environment
|
||||
|
||||
As a contributor, if you want to make any contribution to Kruise project, we should reach an agreement on the version of tools used in the development environment.
|
||||
Here are some dependents with specific version:
|
||||
As a contributor, if you want to make any contribution to the Kruise project, we should reach an agreement on the version of tools used in the development environment.
|
||||
Here are some dependencies with specific versions:
|
||||
|
||||
- Golang : v1.18+
|
||||
- Golang : v1.22+
|
||||
- Kubernetes: v1.16+
|
||||
|
||||
### Developing guide
|
||||
|
@ -106,14 +107,14 @@ make build
|
|||
make test
|
||||
```
|
||||
|
||||
**There are some guide documents for contributors in [./docs/contributing/](./docs/contributing), such as debug guide to help you test your own branch in a Kubernetes cluster.**
|
||||
**There are some guide documents for contributors in [./docs/contributing/](./docs/contributing), such as a debug guide to help you test your own branch in a Kubernetes cluster.**
|
||||
|
||||
### Proposals
|
||||
|
||||
If you are going to contribute a feature with new API or needs significant effort, please submit a proposal in [./docs/proposals/](./docs/proposals) first.
|
||||
If you are going to contribute a feature with a new API or need significant effort, please submit a proposal in [./docs/proposals/](./docs/proposals) first.
|
||||
|
||||
### Kruise Helm Charts
|
||||
[kruise charts](https://github.com/openkruise/charts) is openKruise charts repo, include kruise, kruise rollout, kruise game.
|
||||
[kruise charts](https://github.com/openkruise/charts) is the openKruise charts repo, including kruise, kruise rollout, and kruise game.
|
||||
You can add the corresponding charts package in the versions directory as follows:
|
||||
```
|
||||
versions
|
||||
|
@ -133,13 +134,13 @@ You can add the corresponding charts package in the versions directory as follow
|
|||
|
||||
We choose GitHub as the primary place for Openkruise to collaborate.
|
||||
So the latest updates of Openkruise are always here.
|
||||
Although contributions via PR is an explicit way to help, we still call for any other ways.
|
||||
Although contributions via PR are an explicit way to help, we still call for any other ways.
|
||||
|
||||
- reply to other's issues if you could;
|
||||
- help solve other user's problems;
|
||||
- help review other's PR design;
|
||||
- help review other's codes in PR;
|
||||
- discuss about Openkruise to make things clearer;
|
||||
- discuss Openkruise to make things clearer;
|
||||
- advocate Openkruise technology beyond GitHub;
|
||||
- write blogs on Openkruise and so on.
|
||||
|
||||
|
@ -147,5 +148,5 @@ In a word, **ANY HELP IS CONTRIBUTION**.
|
|||
|
||||
## Join Openkruise as a member
|
||||
|
||||
It is also welcomed to join Openkruise team if you are willing to participate in Openkruise community continuously and keep active.
|
||||
It is also welcomed to join the Openkruise team if you are willing to participate in the Openkruise community continuously and keep active.
|
||||
Please read and follow the [Community Membership](https://github.com/openkruise/community/blob/master/community-membership.md).
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
# Build the manager and daemon binaries
|
||||
ARG BASE_IMAGE=alpine
|
||||
ARG BASE_IMAGE_VERSION=3.19
|
||||
FROM golang:1.20.14-alpine3.19 as builder
|
||||
|
||||
ARG BASE_IMAGE_VERSION=3.21@sha256:56fa17d2a7e7f168a043a2712e63aed1f8543aeafdcee47c58dcffe38ed51099
|
||||
FROM golang:1.23.9-alpine3.21@sha256:fb7ea5cd19bc4eea3eb0d1972919ec0f6229b138985ce4b35ce5846c6bc02973 AS builder
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
ARG BASE_IMAGE=alpine
|
||||
ARG BASE_IMAGE_VERSION=3.19
|
||||
FROM golang:1.20.14-alpine3.19 AS builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# Copy the go source
|
||||
COPY apis/ apis/
|
||||
COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
# Build
|
||||
RUN --mount=type=cache,target=/go CGO_ENABLED=0 GO111MODULE=on go build -a -o helm_hook ./cmd/helm_hook/main.go
|
||||
|
||||
FROM ${BASE_IMAGE}:${BASE_IMAGE_VERSION}
|
||||
WORKDIR /
|
||||
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
|
||||
RUN set -eux; \
|
||||
mkdir -p /log /tmp && \
|
||||
chown -R nobody:nobody /log && \
|
||||
chown -R nobody:nobody /tmp && \
|
||||
apk --no-cache --update upgrade && \
|
||||
apk --no-cache add ca-certificates && \
|
||||
apk --no-cache add tzdata && \
|
||||
rm -rf /var/cache/apk/* && \
|
||||
update-ca-certificates && \
|
||||
echo "only include root and nobody user" && \
|
||||
echo -e "root:x:0:0:root:/root:/bin/ash\nnobody:x:65534:65534:nobody:/:/sbin/nologin" | tee /etc/passwd && \
|
||||
echo -e "root:x:0:root\nnobody:x:65534:" | tee /etc/group
|
||||
COPY --from=builder /workspace/helm_hook .
|
||||
RUN chown -R nobody:nobody /helm_hook && \
|
||||
rm -rf /usr/local/sbin/* && \
|
||||
rm -rf /usr/local/bin/* && \
|
||||
rm -rf /usr/sbin/* && \
|
||||
rm -rf /usr/bin/* && \
|
||||
rm -rf /sbin/* && \
|
||||
rm -rf /bin/*
|
||||
ENTRYPOINT ["/helm_hook"]
|
|
@ -1,7 +1,8 @@
|
|||
# Build the manager and daemon binaries
|
||||
ARG BASE_IMAGE=alpine
|
||||
ARG BASE_IMAGE_VERSION=3.19
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.14-alpine3.19 as builder
|
||||
ARG BASE_IMAGE_VERSION=3.21@sha256:56fa17d2a7e7f168a043a2712e63aed1f8543aeafdcee47c58dcffe38ed51099
|
||||
ARG BUILD_BASE_IMAGE=golang:1.22.11-alpine3.21@sha256:161858498a61ce093c8e2bd704299bfb23e5bff79aef99b6c40bb9c6a43acf0f
|
||||
FROM --platform=$BUILDPLATFORM ${BUILD_BASE_IMAGE} AS builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
|
@ -14,6 +15,9 @@ COPY apis/ apis/
|
|||
COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
#ENV GOPROXY=https://goproxy.cn,direct
|
||||
RUN go mod tidy
|
||||
|
||||
# Build
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
# Build Windows image for kruise-daemon
|
||||
|
||||
# Using Windows HostProcess container base image: https://github.com/microsoft/windows-host-process-containers-base-image
|
||||
ARG BASE_IMAGE=mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image
|
||||
ARG BASE_IMAGE_VERSION=v1.0.0
|
||||
FROM ${BASE_IMAGE}:${BASE_IMAGE_VERSION}
|
||||
|
||||
WORKDIR /
|
||||
COPY ./bin/kruise-daemon.exe .
|
||||
|
||||
ENTRYPOINT ["kruise-daemon.exe"]
|
62
Makefile
62
Makefile
|
@ -1,7 +1,10 @@
|
|||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= openkruise/kruise-manager:test
|
||||
HOOK_IMG ?= openkruise/kruise-helm-hook:test
|
||||
WIN_DAEMON_IMG ?= openkruise/kruise-daemon-win:test
|
||||
# Platforms to build the image for
|
||||
PLATFORMS ?= linux/amd64,linux/arm64,linux/ppc64le
|
||||
WIN_PLATFORMS ?= windows/amd64
|
||||
CRD_OPTIONS ?= "crd:crdVersions=v1"
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
|
@ -14,7 +17,7 @@ GOOS ?= $(shell go env GOOS)
|
|||
|
||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||
# Run `setup-envtest list` to list available versions.
|
||||
ENVTEST_K8S_VERSION ?= 1.28.0
|
||||
ENVTEST_K8S_VERSION ?= 1.32.0
|
||||
|
||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||
# This is a requirement for 'setup-envtest.sh' in the test target.
|
||||
|
@ -27,7 +30,7 @@ all: build
|
|||
##@ Development
|
||||
|
||||
go_check:
|
||||
@scripts/check_go_version "1.20"
|
||||
@scripts/check_go_version "1.23"
|
||||
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
@scripts/generate_client.sh
|
||||
|
@ -35,7 +38,7 @@ generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and
|
|||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./apis/..."
|
||||
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./apis/..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
fmt: go_check ## Run go fmt against code.
|
||||
go fmt $(shell go list ./... | grep -v /vendor/)
|
||||
|
@ -49,8 +52,16 @@ lint: golangci-lint ## Run golangci-lint against code.
|
|||
test: generate fmt vet manifests envtest ## Run tests
|
||||
echo $(ENVTEST)
|
||||
go build -o pkg/daemon/criruntime/imageruntime/fake_plugin/fake-credential-plugin pkg/daemon/criruntime/imageruntime/fake_plugin/main.go && chmod +x pkg/daemon/criruntime/imageruntime/fake_plugin/fake-credential-plugin
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./pkg/... -coverprofile cover.out
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -race ./pkg/... -coverprofile raw-cover.out
|
||||
rm pkg/daemon/criruntime/imageruntime/fake_plugin/fake-credential-plugin
|
||||
grep -v "pkg/client" raw-cover.out > cover.out
|
||||
|
||||
atest:
|
||||
echo $(ENVTEST)
|
||||
go build -o pkg/daemon/criruntime/imageruntime/fake_plugin/fake-credential-plugin pkg/daemon/criruntime/imageruntime/fake_plugin/main.go && chmod +x pkg/daemon/criruntime/imageruntime/fake_plugin/fake-credential-plugin
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -race ./pkg/... -coverprofile raw-cover.out
|
||||
rm pkg/daemon/criruntime/imageruntime/fake_plugin/fake-credential-plugin
|
||||
grep -v "pkg/client" raw-cover.out > cover.out
|
||||
|
||||
coverage-report: ## Generate cover.html from cover.out
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
@ -65,6 +76,9 @@ endif
|
|||
build: generate fmt vet manifests ## Build manager binary.
|
||||
go build -o bin/manager main.go
|
||||
|
||||
build-win-daemon: ## Build Windows daemon binary.
|
||||
GOOS=windows go build -o bin/kruise-daemon.exe ./cmd/daemon/main.go
|
||||
|
||||
run: manifests generate fmt vet ## Run a controller from your host.
|
||||
go run ./main.go
|
||||
|
||||
|
@ -74,6 +88,9 @@ docker-build: ## Build docker image with the manager.
|
|||
docker-push: ## Push docker image with the manager.
|
||||
docker push ${IMG}
|
||||
|
||||
docker-win-daemon: # Build Windows docker image with the daemon
|
||||
docker buildx build -f ./Dockerfile_windows --pull --no-cache --platform=$(WIN_PLATFORMS) . -t $(WIN_DAEMON_IMG)
|
||||
|
||||
# Build and push the multiarchitecture docker images and manifest.
|
||||
docker-multiarch:
|
||||
docker buildx build -f ./Dockerfile_multiarch --pull --no-cache --platform=$(PLATFORMS) --push . -t $(IMG)
|
||||
|
@ -89,7 +106,6 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
|
|||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
echo -e "resources:\n- manager.yaml" > config/manager/kustomization.yaml
|
||||
$(KUSTOMIZE) build config/daemonconfig | kubectl apply -f -
|
||||
|
||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
|
||||
|
@ -99,11 +115,11 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi
|
|||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
|
||||
# controller-gen@v0.14.0 comply with k8s.io/api v0.28.x
|
||||
ifeq ("$(shell $(CONTROLLER_GEN) --version 2> /dev/null)", "Version: v0.14.0")
|
||||
# controller-gen@v0.16.5 comply with k8s.io/api v0.30.x
|
||||
ifeq ("$(shell $(CONTROLLER_GEN) --version 2> /dev/null)", "Version: v0.16.5")
|
||||
else
|
||||
rm -rf $(CONTROLLER_GEN)
|
||||
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0)
|
||||
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.3)
|
||||
endif
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
|
@ -141,17 +157,24 @@ $(TESTBIN):
|
|||
ENVTEST ?= $(TESTBIN)/setup-envtest
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
$(ENVTEST): $(TESTBIN)
|
||||
ifeq (, $(shell ls $(TESTBIN)/setup-envtest 2>/dev/null))
|
||||
GOBIN=$(TESTBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@c7e1dc9b5302d649d5531e19168dd7ea0013736d
|
||||
endif
|
||||
envtest: $(TESTBIN) ## Download/update envtest-setup to latest version.
|
||||
GOBIN=$(TESTBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
||||
|
||||
# create-cluster creates a kube cluster with kind.
|
||||
.PHONY: create-cluster
|
||||
create-cluster: $(tools/kind)
|
||||
tools/hack/create-cluster.sh
|
||||
|
||||
DISABLE_CSI ?= false
|
||||
|
||||
.PHONY: install-csi
|
||||
install-csi:
|
||||
ifeq ($(DISABLE_CSI), true)
|
||||
@echo "CSI is disabled, skip"
|
||||
else
|
||||
cd tools/hack/csi-driver-host-path; ./install-snapshot.sh
|
||||
endif
|
||||
|
||||
# delete-cluster deletes a kube cluster.
|
||||
.PHONY: delete-cluster
|
||||
delete-cluster: $(tools/kind) ## Delete kind cluster.
|
||||
|
@ -165,6 +188,13 @@ kube-load-image: $(tools/kind)
|
|||
# install-kruise install kruise with local build image to kube cluster.
|
||||
.PHONY: install-kruise
|
||||
install-kruise:
|
||||
kubectl create namespace kruise-system;
|
||||
ifeq ($(DISABLE_E2E_CONFIG), true)
|
||||
@echo "Skipping e2e config application...";
|
||||
else
|
||||
@echo "Applying e2e config...";
|
||||
kubectl apply -f test/kruise-e2e-config.yaml;
|
||||
endif
|
||||
tools/hack/install-kruise.sh $(IMG)
|
||||
|
||||
# run-kruise-e2e-test starts to run kruise e2e tests.
|
||||
|
@ -178,4 +208,8 @@ generate_helm_crds:
|
|||
|
||||
# kruise-e2e-test runs kruise e2e tests.
|
||||
.PHONY: kruise-e2e-test
|
||||
kruise-e2e-test: $(tools/kind) delete-cluster create-cluster docker-build kube-load-image install-kruise run-kruise-e2e-test delete-cluster
|
||||
kruise-e2e-test: $(tools/kind) delete-cluster create-cluster install-csi docker-build kube-load-image install-kruise run-kruise-e2e-test delete-cluster
|
||||
|
||||
.PHONY: docker-build-hook
|
||||
docker-build-hook:
|
||||
docker buildx build -f ./Dockerfile_helm_hook --pull --no-cache --platform=$(PLATFORMS) --push . -t $(HOOK_IMG)
|
|
@ -66,6 +66,14 @@ OpenKruise (官网: [https://openkruise.io](https://openkruise.io)) 是CNCF([Clo
|
|||
- 安装/升级 Kruise [稳定版本](https://openkruise.io/docs/installation)
|
||||
- 安装/升级 Kruise [最新版本(包括 alpha/beta/rc)](https://openkruise.io/docs/next/installation)
|
||||
|
||||
### 在阿里云上快速体验
|
||||
|
||||
- 3分钟内在阿里云上创建 Kruise 体验环境:
|
||||
|
||||
<a href="https://acs.console.aliyun.com/quick-deploy?repo=openkruise/charts&branch=master&paths=%5B%22versions/kruise/1.7.3%22%5D" target="_blank">
|
||||
<img src="https://img.alicdn.com/imgextra/i1/O1CN01aiPSuA1Wiz7wkgF5u_!!6000000002823-55-tps-399-70.svg" width="200" alt="Deploy on Alibaba Cloud">
|
||||
</a>
|
||||
|
||||
## 用户
|
||||
|
||||
登记: [如果贵司正在使用 Kruise 请留言](https://github.com/openkruise/kruise/issues/289)
|
||||
|
|
11
README.md
11
README.md
|
@ -7,6 +7,7 @@
|
|||
[](https://circleci.com/gh/openkruise/kruise)
|
||||
[](https://codecov.io/gh/openkruise/kruise)
|
||||
[](./CODE_OF_CONDUCT.md)
|
||||
[](https://gurubase.io/g/kruise)
|
||||
|
||||
English | [简体中文](./README-zh_CN.md)
|
||||
|
||||
|
@ -31,7 +32,7 @@ It consists of several controllers which extend and complement the [Kubernetes c
|
|||
|
||||
- **Sidecar container Management**
|
||||
|
||||
Kruise simplify sidecar injection and enable sidecar in-place update. Kruise also enhance the sidecar startup and termination control.
|
||||
Kruise simplifies sidecar injection and enables sidecar in-place update. Kruise also enhances the sidecar startup and termination control.
|
||||
|
||||
- [**SidecarSet** for defining and upgrading your own sidecars](https://openkruise.io/docs/user-manuals/sidecarset)
|
||||
- [**Container Launch Priority** to control the container startup orders](https://openkruise.io/docs/user-manuals/containerlaunchpriority)
|
||||
|
@ -67,6 +68,14 @@ You can view the full documentation from the [OpenKruise website](https://openkr
|
|||
- Install or upgrade Kruise with [the stable version](https://openkruise.io/docs/installation).
|
||||
- Install or upgrade Kruise with [the latest version including alpha/beta/rc](https://openkruise.io/docs/next/installation).
|
||||
|
||||
### Get Your Own Demo with Alibaba Cloud
|
||||
|
||||
- install Kruise on a Serverless K8S cluster in 3 minutes, try:
|
||||
|
||||
<a href="https://acs.console.aliyun.com/quick-deploy?repo=openkruise/charts&branch=master&paths=%5B%22versions/kruise/1.8.0%22%5D" target="_blank">
|
||||
<img src="https://img.alicdn.com/imgextra/i1/O1CN01aiPSuA1Wiz7wkgF5u_!!6000000002823-55-tps-399-70.svg" width="200" alt="Deploy on Alibaba Cloud">
|
||||
</a>
|
||||
|
||||
## Users
|
||||
|
||||
Registration: [Who is using Kruise](https://github.com/openkruise/kruise/issues/289)
|
||||
|
|
|
@ -15,7 +15,7 @@ Look at [the last release](https://github.com/openkruise/kruise/releases/latest)
|
|||
|
||||
Add a new section in [CHANGELOG.md](./CHANGELOG.md) for the new version that is being released along with the new features, patches and deprecations it introduces.
|
||||
|
||||
It should not include every single change but solely what matters to our customers, for example issue template that has changed is not important.
|
||||
It should not include every single change but solely what matters to our customers, for example, an issue template that has changed is not important.
|
||||
|
||||
## 2. Publish documentation for new version
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
Defined below are the security persons of contact for this project. If you have questions regarding the triaging and handling of incoming problems, they may be contacted.
|
||||
|
||||
The following security contacts have agreed to abide by the Embargo Policy $LINK and will be removed and replaced if found to be in violation of that agreement.
|
||||
The following security contacts have agreed to abide by the [Embargo Policy](embargo-policy.md) and will be removed and replaced if found to be in violation of that agreement.
|
||||
|
||||
DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, USE THE INSTRUCTIONS AT [SECURITY.md](SECURITY.md)
|
||||
|
||||
|
|
|
@ -192,12 +192,6 @@ func SetDefaultPodVolumes(volumes []corev1.Volume) {
|
|||
if a.VolumeSource.Secret != nil {
|
||||
v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
|
||||
}
|
||||
if a.VolumeSource.ISCSI != nil {
|
||||
v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
|
||||
}
|
||||
if a.VolumeSource.RBD != nil {
|
||||
v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
|
||||
}
|
||||
if a.VolumeSource.DownwardAPI != nil {
|
||||
v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
|
||||
for j := range a.VolumeSource.DownwardAPI.Items {
|
||||
|
@ -210,9 +204,6 @@ func SetDefaultPodVolumes(volumes []corev1.Volume) {
|
|||
if a.VolumeSource.ConfigMap != nil {
|
||||
v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
|
||||
}
|
||||
if a.VolumeSource.AzureDisk != nil {
|
||||
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
|
@ -230,8 +221,5 @@ func SetDefaultPodVolumes(volumes []corev1.Volume) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
|
@ -70,7 +70,7 @@ func SetDefaultInjectRevision(strategy *v1alpha1.SidecarSetInjectionStrategy) {
|
|||
|
||||
func SetDefaultRevisionHistoryLimit(revisionHistoryLimit **int32) {
|
||||
if *revisionHistoryLimit == nil {
|
||||
*revisionHistoryLimit = utilpointer.Int32Ptr(10)
|
||||
*revisionHistoryLimit = ptr.To(int32(10))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ func SetDefaultsBroadcastJob(obj *v1alpha1.BroadcastJob, injectTemplateDefaults
|
|||
// SetDefaults_UnitedDeployment set default values for UnitedDeployment.
|
||||
func SetDefaultsUnitedDeployment(obj *v1alpha1.UnitedDeployment, injectTemplateDefaults bool) {
|
||||
if obj.Spec.RevisionHistoryLimit == nil {
|
||||
obj.Spec.RevisionHistoryLimit = utilpointer.Int32Ptr(10)
|
||||
obj.Spec.RevisionHistoryLimit = ptr.To(int32(10))
|
||||
}
|
||||
|
||||
if len(obj.Spec.UpdateStrategy.Type) == 0 {
|
||||
|
@ -255,10 +255,10 @@ func SetDefaultsUnitedDeployment(obj *v1alpha1.UnitedDeployment, injectTemplateD
|
|||
// SetDefaults_CloneSet set default values for CloneSet.
|
||||
func SetDefaultsCloneSet(obj *v1alpha1.CloneSet, injectTemplateDefaults bool) {
|
||||
if obj.Spec.Replicas == nil {
|
||||
obj.Spec.Replicas = utilpointer.Int32Ptr(1)
|
||||
obj.Spec.Replicas = ptr.To(int32(1))
|
||||
}
|
||||
if obj.Spec.RevisionHistoryLimit == nil {
|
||||
obj.Spec.RevisionHistoryLimit = utilpointer.Int32Ptr(10)
|
||||
obj.Spec.RevisionHistoryLimit = ptr.To(int32(10))
|
||||
}
|
||||
|
||||
if injectTemplateDefaults {
|
||||
|
@ -370,10 +370,10 @@ func SetDefaultsNodeImage(obj *v1alpha1.NodeImage) {
|
|||
|
||||
func SetDefaultsImageTagPullPolicy(obj *v1alpha1.ImageTagPullPolicy) {
|
||||
if obj.TimeoutSeconds == nil {
|
||||
obj.TimeoutSeconds = utilpointer.Int32Ptr(600)
|
||||
obj.TimeoutSeconds = ptr.To(int32(600))
|
||||
}
|
||||
if obj.BackoffLimit == nil {
|
||||
obj.BackoffLimit = utilpointer.Int32Ptr(3)
|
||||
obj.BackoffLimit = ptr.To(int32(3))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,10 +386,10 @@ func SetDefaultsImagePullJob(obj *v1alpha1.ImagePullJob, addProtection bool) {
|
|||
obj.Spec.PullPolicy = &v1alpha1.PullPolicy{}
|
||||
}
|
||||
if obj.Spec.PullPolicy.TimeoutSeconds == nil {
|
||||
obj.Spec.PullPolicy.TimeoutSeconds = utilpointer.Int32Ptr(600)
|
||||
obj.Spec.PullPolicy.TimeoutSeconds = ptr.To(int32(600))
|
||||
}
|
||||
if obj.Spec.PullPolicy.BackoffLimit == nil {
|
||||
obj.Spec.PullPolicy.BackoffLimit = utilpointer.Int32Ptr(3)
|
||||
obj.Spec.PullPolicy.BackoffLimit = ptr.To(int32(3))
|
||||
}
|
||||
if obj.Spec.ImagePullPolicy == "" {
|
||||
obj.Spec.ImagePullPolicy = v1alpha1.PullIfNotPresent
|
||||
|
@ -408,9 +408,9 @@ func SetDefaultsImageListPullJob(obj *v1alpha1.ImageListPullJob) {
|
|||
obj.Spec.PullPolicy = &v1alpha1.PullPolicy{}
|
||||
}
|
||||
if obj.Spec.PullPolicy.TimeoutSeconds == nil {
|
||||
obj.Spec.PullPolicy.TimeoutSeconds = utilpointer.Int32Ptr(600)
|
||||
obj.Spec.PullPolicy.TimeoutSeconds = ptr.To(int32(600))
|
||||
}
|
||||
if obj.Spec.PullPolicy.BackoffLimit == nil {
|
||||
obj.Spec.PullPolicy.BackoffLimit = utilpointer.Int32Ptr(3)
|
||||
obj.Spec.PullPolicy.BackoffLimit = ptr.To(int32(3))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
|||
package defaults
|
||||
|
||||
import (
|
||||
"github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
"github.com/openkruise/kruise/pkg/features"
|
||||
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
"github.com/openkruise/kruise/pkg/features"
|
||||
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
|
||||
)
|
||||
|
||||
// SetDefaultsStatefulSet set default values for StatefulSet.
|
||||
|
@ -42,7 +43,7 @@ func SetDefaultsStatefulSet(obj *v1beta1.StatefulSet, injectTemplateDefaults boo
|
|||
obj.Spec.UpdateStrategy.RollingUpdate = &v1beta1.RollingUpdateStatefulSetStrategy{}
|
||||
}
|
||||
if obj.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
|
||||
obj.Spec.UpdateStrategy.RollingUpdate.Partition = utilpointer.Int32Ptr(0)
|
||||
obj.Spec.UpdateStrategy.RollingUpdate.Partition = ptr.To(int32(0))
|
||||
}
|
||||
if obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable == nil {
|
||||
maxUnavailable := intstr.FromInt(1)
|
||||
|
@ -52,7 +53,7 @@ func SetDefaultsStatefulSet(obj *v1beta1.StatefulSet, injectTemplateDefaults boo
|
|||
obj.Spec.UpdateStrategy.RollingUpdate.PodUpdatePolicy = v1beta1.RecreatePodUpdateStrategyType
|
||||
}
|
||||
if obj.Spec.UpdateStrategy.RollingUpdate.MinReadySeconds == nil {
|
||||
obj.Spec.UpdateStrategy.RollingUpdate.MinReadySeconds = utilpointer.Int32Ptr(0)
|
||||
obj.Spec.UpdateStrategy.RollingUpdate.MinReadySeconds = ptr.To(int32(0))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,11 +69,17 @@ func SetDefaultsStatefulSet(obj *v1beta1.StatefulSet, injectTemplateDefaults boo
|
|||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoResizePVCGate) {
|
||||
if obj.Spec.VolumeClaimUpdateStrategy.Type == "" {
|
||||
obj.Spec.VolumeClaimUpdateStrategy.Type = v1beta1.OnPVCDeleteVolumeClaimUpdateStrategyType
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Spec.Replicas == nil {
|
||||
obj.Spec.Replicas = utilpointer.Int32Ptr(1)
|
||||
obj.Spec.Replicas = ptr.To(int32(1))
|
||||
}
|
||||
if obj.Spec.RevisionHistoryLimit == nil {
|
||||
obj.Spec.RevisionHistoryLimit = utilpointer.Int32Ptr(10)
|
||||
obj.Spec.RevisionHistoryLimit = ptr.To(int32(10))
|
||||
}
|
||||
|
||||
if injectTemplateDefaults {
|
||||
|
|
|
@ -62,12 +62,21 @@ type InPlaceUpdateState struct {
|
|||
// UpdateEnvFromMetadata indicates there are envs from annotations/labels that should be in-place update.
|
||||
UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"`
|
||||
|
||||
// UpdateResources indicates there are resources that should be in-place update.
|
||||
UpdateResources bool `json:"updateResources,omitempty"`
|
||||
|
||||
// UpdateImages indicates there are images that should be in-place update.
|
||||
UpdateImages bool `json:"updateImages,omitempty"`
|
||||
|
||||
// NextContainerImages is the containers with lower priority that waiting for in-place update images in next batch.
|
||||
NextContainerImages map[string]string `json:"nextContainerImages,omitempty"`
|
||||
|
||||
// NextContainerRefMetadata is the containers with lower priority that waiting for in-place update labels/annotations in next batch.
|
||||
NextContainerRefMetadata map[string]metav1.ObjectMeta `json:"nextContainerRefMetadata,omitempty"`
|
||||
|
||||
// NextContainerResources is the containers with lower priority that waiting for in-place update resources in next batch.
|
||||
NextContainerResources map[string]v1.ResourceRequirements `json:"nextContainerResources,omitempty"`
|
||||
|
||||
// PreCheckBeforeNext is the pre-check that must pass before the next containers can be in-place update.
|
||||
PreCheckBeforeNext *InPlaceUpdatePreCheckBeforeNext `json:"preCheckBeforeNext,omitempty"`
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ limitations under the License.
|
|||
package pub
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -105,6 +106,13 @@ func (in *InPlaceUpdateState) DeepCopyInto(out *InPlaceUpdateState) {
|
|||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.NextContainerResources != nil {
|
||||
in, out := &in.NextContainerResources, &out.NextContainerResources
|
||||
*out = make(map[string]corev1.ResourceRequirements, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.PreCheckBeforeNext != nil {
|
||||
in, out := &in.PreCheckBeforeNext, &out.PreCheckBeforeNext
|
||||
*out = new(InPlaceUpdatePreCheckBeforeNext)
|
||||
|
|
|
@ -17,10 +17,11 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
appspub "github.com/openkruise/kruise/apis/apps/pub"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
appspub "github.com/openkruise/kruise/apis/apps/pub"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -178,6 +179,8 @@ type CloneSetStatus struct {
|
|||
|
||||
// UpdatedAvailableReplicas is the number of Pods created by the CloneSet controller from the CloneSet version
|
||||
// indicated by updateRevision and have a Ready Condition for at least minReadySeconds.
|
||||
// Notice: when enable InPlaceWorkloadVerticalScaling, pod during resource resizing will also be unavailable.
|
||||
// This means these pod will be counted in maxUnavailable.
|
||||
UpdatedAvailableReplicas int32 `json:"updatedAvailableReplicas,omitempty"`
|
||||
|
||||
// ExpectedUpdatedReplicas is the number of Pods that should be updated by CloneSet controller.
|
||||
|
@ -237,6 +240,7 @@ type CloneSetCondition struct {
|
|||
// +kubebuilder:printcolumn:name="DESIRED",type="integer",JSONPath=".spec.replicas",description="The desired number of pods."
|
||||
// +kubebuilder:printcolumn:name="UPDATED",type="integer",JSONPath=".status.updatedReplicas",description="The number of pods updated."
|
||||
// +kubebuilder:printcolumn:name="UPDATED_READY",type="integer",JSONPath=".status.updatedReadyReplicas",description="The number of pods updated and ready."
|
||||
// +kubebuilder:printcolumn:name="UPDATED_AVAILABLE",type="integer",JSONPath=".status.updatedAvailableReplicas",description="The number of pods updated and available."
|
||||
// +kubebuilder:printcolumn:name="READY",type="integer",JSONPath=".status.readyReplicas",description="The number of pods ready."
|
||||
// +kubebuilder:printcolumn:name="TOTAL",type="integer",JSONPath=".status.replicas",description="The number of currently all pods."
|
||||
// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC."
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -87,6 +88,17 @@ const (
|
|||
ProbeUnknown ProbeState = "Unknown"
|
||||
)
|
||||
|
||||
func (p ProbeState) IsEqualPodConditionStatus(status corev1.ConditionStatus) bool {
|
||||
switch status {
|
||||
case corev1.ConditionTrue:
|
||||
return p == ProbeSucceeded
|
||||
case corev1.ConditionFalse:
|
||||
return p == ProbeFailed
|
||||
default:
|
||||
return p == ProbeUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:openapi-gen=true
|
||||
|
|
|
@ -130,6 +130,10 @@ type NodeImageStatus struct {
|
|||
// +optional
|
||||
Pulling int32 `json:"pulling"`
|
||||
|
||||
// The number of pulling tasks which are waiting.
|
||||
// +optional
|
||||
Waiting int32 `json:"waiting"`
|
||||
|
||||
// all statuses of active image pulling tasks
|
||||
ImageStatuses map[string]ImageStatus `json:"imageStatuses,omitempty"`
|
||||
|
||||
|
|
|
@ -21,6 +21,31 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// PodProbeMarkerAnnotationKey records the Probe Spec, mainly used for serverless Pod scenarios, as follows:
|
||||
// annotations:
|
||||
// kruise.io/podprobe: |
|
||||
// [
|
||||
// {
|
||||
// "containerName": "minecraft",
|
||||
// "name": "healthy",
|
||||
// "podConditionType": "game.kruise.io/healthy",
|
||||
// "probe": {
|
||||
// "exec": {
|
||||
// "command": [
|
||||
// "bash",
|
||||
// "/data/probe.sh"
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
PodProbeMarkerAnnotationKey = "kruise.io/podprobe"
|
||||
// PodProbeMarkerListAnnotationKey records the injected PodProbeMarker Name List
|
||||
// example: kruise.io/podprobemarker-list="probe-marker-1,probe-marker-2"
|
||||
PodProbeMarkerListAnnotationKey = "kruise.io/podprobemarker-list"
|
||||
)
|
||||
|
||||
// PodProbeMarkerSpec defines the desired state of PodProbeMarker
|
||||
type PodProbeMarkerSpec struct {
|
||||
// Selector is a label query over pods that should exec custom probe
|
||||
|
|
|
@ -132,9 +132,14 @@ type SidecarContainer struct {
|
|||
UpgradeStrategy SidecarContainerUpgradeStrategy `json:"upgradeStrategy,omitempty"`
|
||||
|
||||
// If ShareVolumePolicy is enabled, the sidecar container will share the other container's VolumeMounts
|
||||
// in the pod(don't contains the injected sidecar container).
|
||||
// in the pod(not including the injected sidecar container).
|
||||
ShareVolumePolicy ShareVolumePolicy `json:"shareVolumePolicy,omitempty"`
|
||||
|
||||
// If ShareVolumeDevicePolicy is enabled, the sidecar container will share the other container's VolumeDevices
|
||||
// in the pod(don't contain the injected sidecar container).
|
||||
// This is a pointer to ensure that the sidecarset-hash does not change if the user does not configure this field, mainly for compatibility with older versions.
|
||||
ShareVolumeDevicePolicy *ShareVolumePolicy `json:"shareVolumeDevicePolicy,omitempty"`
|
||||
|
||||
// TransferEnv will transfer env info from other container
|
||||
// SourceContainerName is pod.spec.container[x].name; EnvName is pod.spec.container[x].Env.name
|
||||
TransferEnv []TransferEnvVar `json:"transferEnv,omitempty"`
|
||||
|
@ -216,7 +221,8 @@ type SidecarSetInjectRevision struct {
|
|||
// + optional
|
||||
RevisionName *string `json:"revisionName,omitempty"`
|
||||
// Policy describes the behavior of revision injection.
|
||||
// Defaults to Always.
|
||||
// +kubebuilder:validation:Enum=Always;Partial;
|
||||
// +kubebuilder:default=Always
|
||||
Policy SidecarSetInjectRevisionPolicy `json:"policy,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -226,9 +232,15 @@ const (
|
|||
// AlwaysSidecarSetInjectRevisionPolicy means the SidecarSet will always inject
|
||||
// the specific revision to Pods when pod creating, except matching UpdateStrategy.Selector.
|
||||
AlwaysSidecarSetInjectRevisionPolicy SidecarSetInjectRevisionPolicy = "Always"
|
||||
// PartitionBasedSidecarSetInjectRevisionPolicy means the SidecarSet will inject the
|
||||
// specific or the latest revision according to Partition.
|
||||
//PartitionBasedSidecarSetInjectRevisionPolicy SidecarSetInjectRevisionPolicy = "PartitionBased"
|
||||
|
||||
// PartialSidecarSetInjectRevisionPolicy means the SidecarSet will inject the specific or the latest revision according to UpdateStrategy.
|
||||
//
|
||||
// If UpdateStrategy.Pause is not true, only when a newly created Pod is **not** selected by the Selector explicitly
|
||||
// configured in `UpdateStrategy` will it be injected with the specified version of the Sidecar.
|
||||
// Under all other conditions, newly created Pods have a probability of being injected with the latest Sidecar,
|
||||
// where the probability is `1 - UpdateStrategy.Partition`.
|
||||
// If `Partition` is not a percentage or is not configured, its value is considered to be 0%.
|
||||
PartialSidecarSetInjectRevisionPolicy SidecarSetInjectRevisionPolicy = "Partial"
|
||||
)
|
||||
|
||||
// SidecarSetUpdateStrategy indicates the strategy that the SidecarSet
|
||||
|
@ -242,11 +254,15 @@ type SidecarSetUpdateStrategy struct {
|
|||
Type SidecarSetUpdateStrategyType `json:"type,omitempty"`
|
||||
|
||||
// Paused indicates that the SidecarSet is paused to update the injected pods,
|
||||
// but it don't affect the webhook inject sidecar container into the newly created pods.
|
||||
// default is false
|
||||
// For the impact on the injection behavior for newly created Pods, please refer to the comments of Selector.
|
||||
Paused bool `json:"paused,omitempty"`
|
||||
|
||||
// If selector is not nil, this upgrade will only update the selected pods.
|
||||
//
|
||||
// Starting from Kruise 1.8.0, the updateStrategy.Selector affects the version of the Sidecar container
|
||||
// injected into newly created Pods by a SidecarSet configured with an injectionStrategy.
|
||||
// In most cases, all newly created Pods are injected with the specified Sidecar version as configured in injectionStrategy.revision,
|
||||
// which is consistent with previous versions.
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||
|
||||
// Partition is the desired number of pods in old revisions. It means when partition
|
||||
|
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
)
|
||||
|
||||
// UpdateStrategyType is a string enumeration type that enumerates
|
||||
|
@ -47,6 +48,8 @@ const (
|
|||
SubsetUpdated UnitedDeploymentConditionType = "SubsetUpdated"
|
||||
// SubsetFailure is added to a UnitedDeployment when one of its subsets has failure during its own reconciling.
|
||||
SubsetFailure UnitedDeploymentConditionType = "SubsetFailure"
|
||||
// UnitedDeploymentUpdated means currentRevision is equal to updatedRevision.
|
||||
UnitedDeploymentUpdated UnitedDeploymentConditionType = "UnitedDeploymentUpdated"
|
||||
)
|
||||
|
||||
// UnitedDeploymentSpec defines the desired state of UnitedDeployment.
|
||||
|
@ -165,6 +168,10 @@ type Topology struct {
|
|||
// +patchStrategy=merge
|
||||
// +optional
|
||||
Subsets []Subset `json:"subsets,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
|
||||
// ScheduleStrategy indicates the strategy the UnitedDeployment used to preform the schedule between each of subsets.
|
||||
// +optional
|
||||
ScheduleStrategy UnitedDeploymentScheduleStrategy `json:"scheduleStrategy,omitempty"`
|
||||
}
|
||||
|
||||
// Subset defines the detail of a subset.
|
||||
|
@ -218,6 +225,80 @@ type Subset struct {
|
|||
Patch runtime.RawExtension `json:"patch,omitempty"`
|
||||
}
|
||||
|
||||
// UnitedDeploymentScheduleStrategyType is a string enumeration type that enumerates
|
||||
// all possible schedule strategies for the UnitedDeployment controller.
|
||||
// +kubebuilder:validation:Enum=Adaptive;Fixed;""
|
||||
type UnitedDeploymentScheduleStrategyType string
|
||||
|
||||
const (
|
||||
// AdaptiveUnitedDeploymentScheduleStrategyType represents that when a pod is stuck in the pending status and cannot
|
||||
// be scheduled, allow it to be rescheduled to another subset.
|
||||
AdaptiveUnitedDeploymentScheduleStrategyType UnitedDeploymentScheduleStrategyType = "Adaptive"
|
||||
// FixedUnitedDeploymentScheduleStrategyType represents that pods are strictly scheduled to the selected subset
|
||||
// even if scheduling fail.
|
||||
FixedUnitedDeploymentScheduleStrategyType UnitedDeploymentScheduleStrategyType = "Fixed"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultRescheduleCriticalDuration = 30 * time.Second
|
||||
DefaultUnschedulableStatusLastDuration = 300 * time.Second
|
||||
)
|
||||
|
||||
// AdaptiveUnitedDeploymentStrategy is used to communicate parameters when Type is AdaptiveUnitedDeploymentScheduleStrategyType.
|
||||
type AdaptiveUnitedDeploymentStrategy struct {
|
||||
// RescheduleCriticalSeconds indicates how long controller will reschedule a schedule failed Pod to the subset that has
|
||||
// redundant capacity after the subset where the Pod lives. If a Pod was scheduled failed and still in an unschedulabe status
|
||||
// over RescheduleCriticalSeconds duration, the controller will reschedule it to a suitable subset. Default is 30 seconds.
|
||||
// +optional
|
||||
RescheduleCriticalSeconds *int32 `json:"rescheduleCriticalSeconds,omitempty"`
|
||||
|
||||
// UnschedulableDuration is used to set the number of seconds for a Subset to recover from an unschedulable state,
|
||||
// with a default value of 300 seconds.
|
||||
// +optional
|
||||
UnschedulableDuration *int32 `json:"unschedulableDuration,omitempty"`
|
||||
|
||||
// ReserveUnschedulablePods indicates whether to enable reservation rescheduling mode, which is disabled by default.
|
||||
// If this feature is enabled, those pending pods that would otherwise be permanently transferred to other subsets
|
||||
// due to scheduling failure will be retained, and a temporary substitute Pod will be created in another subset to take over its work.
|
||||
// When the retained pod is successfully scheduled and ready, its temporary substitute will be deleted.
|
||||
// +optional
|
||||
ReserveUnschedulablePods bool `json:"reserveUnschedulablePods,omitempty"`
|
||||
}
|
||||
|
||||
// UnitedDeploymentScheduleStrategy defines the schedule performance of UnitedDeployment.
|
||||
type UnitedDeploymentScheduleStrategy struct {
|
||||
// Type indicates the type of the UnitedDeploymentScheduleStrategy.
|
||||
// Default is Fixed
|
||||
// +optional
|
||||
Type UnitedDeploymentScheduleStrategyType `json:"type,omitempty"`
|
||||
|
||||
// Adaptive is used to communicate parameters when Type is AdaptiveUnitedDeploymentScheduleStrategyType.
|
||||
// +optional
|
||||
Adaptive *AdaptiveUnitedDeploymentStrategy `json:"adaptive,omitempty"`
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentScheduleStrategy) IsAdaptive() bool {
|
||||
return s.Type == AdaptiveUnitedDeploymentScheduleStrategyType
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentScheduleStrategy) ShouldReserveUnschedulablePods() bool {
|
||||
return s.IsAdaptive() && s.Adaptive != nil && s.Adaptive.ReserveUnschedulablePods
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentScheduleStrategy) GetRescheduleCriticalDuration() time.Duration {
|
||||
if s.Adaptive == nil || s.Adaptive.RescheduleCriticalSeconds == nil {
|
||||
return DefaultRescheduleCriticalDuration
|
||||
}
|
||||
return time.Duration(*s.Adaptive.RescheduleCriticalSeconds) * time.Second
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentScheduleStrategy) GetUnschedulableDuration() time.Duration {
|
||||
if s.Adaptive == nil || s.Adaptive.UnschedulableDuration == nil {
|
||||
return DefaultUnschedulableStatusLastDuration
|
||||
}
|
||||
return time.Duration(*s.Adaptive.UnschedulableDuration) * time.Second
|
||||
}
|
||||
|
||||
// UnitedDeploymentStatus defines the observed state of UnitedDeployment.
|
||||
type UnitedDeploymentStatus struct {
|
||||
// ObservedGeneration is the most recent generation observed for this UnitedDeployment. It corresponds to the
|
||||
|
@ -235,6 +316,9 @@ type UnitedDeploymentStatus struct {
|
|||
// The number of pods in current version.
|
||||
UpdatedReplicas int32 `json:"updatedReplicas"`
|
||||
|
||||
// The number of reserved pods in temporary adaptive strategy.
|
||||
ReservedPods int32 `json:"reservedPods,omitempty"`
|
||||
|
||||
// The number of ready current revision replicas for this UnitedDeployment.
|
||||
// +optional
|
||||
UpdatedReadyReplicas int32 `json:"updatedReadyReplicas,omitempty"`
|
||||
|
@ -252,6 +336,8 @@ type UnitedDeploymentStatus struct {
|
|||
// +optional
|
||||
SubsetReplicas map[string]int32 `json:"subsetReplicas,omitempty"`
|
||||
|
||||
// Record the conditions of each subset.
|
||||
SubsetStatuses []UnitedDeploymentSubsetStatus `json:"subsetStatuses,omitempty"`
|
||||
// Represents the latest available observations of a UnitedDeployment's current state.
|
||||
// +optional
|
||||
Conditions []UnitedDeploymentCondition `json:"conditions,omitempty"`
|
||||
|
@ -264,6 +350,15 @@ type UnitedDeploymentStatus struct {
|
|||
LabelSelector string `json:"labelSelector,omitempty"`
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentStatus) GetSubsetStatus(subset string) *UnitedDeploymentSubsetStatus {
|
||||
for i, subsetStatus := range s.SubsetStatuses {
|
||||
if subsetStatus.Name == subset {
|
||||
return &s.SubsetStatuses[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnitedDeploymentCondition describes current state of a UnitedDeployment.
|
||||
type UnitedDeploymentCondition struct {
|
||||
// Type of in place set condition.
|
||||
|
@ -278,7 +373,7 @@ type UnitedDeploymentCondition struct {
|
|||
// The reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
|
||||
// A human readable message indicating details about the transition.
|
||||
// A human-readable message indicating details about the transition.
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -293,6 +388,66 @@ type UpdateStatus struct {
|
|||
CurrentPartitions map[string]int32 `json:"currentPartitions,omitempty"`
|
||||
}
|
||||
|
||||
type UnitedDeploymentSubsetStatus struct {
|
||||
// Subset name specified in Topology.Subsets
|
||||
Name string `json:"name,omitempty"`
|
||||
// Records the current replicas. Currently unused.
|
||||
Replicas int32 `json:"replicas,omitempty"`
|
||||
// Records the current ready replicas. Currently unused.
|
||||
ReadyReplicas int32 `json:"readyReplicas,omitempty"`
|
||||
// Records the current partition. Currently unused.
|
||||
Partition int32 `json:"partition,omitempty"`
|
||||
// Records the reserved pods in the subset.
|
||||
ReservedPods int32 `json:"reservedPods,omitempty"`
|
||||
// Conditions is an array of current observed subset conditions.
|
||||
Conditions []UnitedDeploymentSubsetCondition `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentSubsetStatus) GetCondition(condType UnitedDeploymentSubsetConditionType) *UnitedDeploymentSubsetCondition {
|
||||
for _, condition := range s.Conditions {
|
||||
if condition.Type == condType {
|
||||
return &condition
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *UnitedDeploymentSubsetStatus) SetCondition(condType UnitedDeploymentSubsetConditionType, status corev1.ConditionStatus, reason, message string) {
|
||||
var currentCond *UnitedDeploymentSubsetCondition
|
||||
for i, c := range s.Conditions {
|
||||
if c.Type == condType {
|
||||
currentCond = &s.Conditions[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if currentCond != nil && currentCond.Status == status && currentCond.Reason == reason {
|
||||
return
|
||||
}
|
||||
if currentCond == nil {
|
||||
s.Conditions = append(s.Conditions, UnitedDeploymentSubsetCondition{Type: condType})
|
||||
currentCond = &s.Conditions[len(s.Conditions)-1]
|
||||
}
|
||||
currentCond.LastTransitionTime = metav1.Now()
|
||||
currentCond.Status = status
|
||||
currentCond.Reason = reason
|
||||
currentCond.Message = message
|
||||
}
|
||||
|
||||
type UnitedDeploymentSubsetConditionType string
|
||||
|
||||
const (
|
||||
// UnitedDeploymentSubsetSchedulable means new pods allocated into the subset will keep pending.
|
||||
UnitedDeploymentSubsetSchedulable UnitedDeploymentSubsetConditionType = "Schedulable"
|
||||
)
|
||||
|
||||
type UnitedDeploymentSubsetCondition struct {
|
||||
Type UnitedDeploymentSubsetConditionType `json:"type"`
|
||||
Status corev1.ConditionStatus `json:"status"`
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
|
|
|
@ -4,6 +4,9 @@ const (
|
|||
// ControllerRevisionHashLabelKey is used to record the controller revision of current resource.
|
||||
ControllerRevisionHashLabelKey = "apps.kruise.io/controller-revision-hash"
|
||||
|
||||
// ReservedPodLabelKey is used to mark the reserved pods.
|
||||
ReservedPodLabelKey = "apps.kruise.io/united-deployment-reserved-pod"
|
||||
|
||||
// SubSetNameLabelKey is used to record the name of current subset.
|
||||
SubSetNameLabelKey = "apps.kruise.io/subset-name"
|
||||
|
||||
|
@ -29,4 +32,7 @@ const (
|
|||
// using in-place update strategy to kill sidecar. This image must be given if you want to use in-place update
|
||||
// strategy to terminate sidecar containers.
|
||||
KruiseTerminateSidecarWithImageEnv = "KRUISE_TERMINATE_SIDECAR_WHEN_JOB_EXIT_WITH_IMAGE"
|
||||
|
||||
// KruiseIgnoreContainerExitCodeEnv is an env name, which represents a switch to ignore the exit code of sidecar container.
|
||||
KruiseIgnoreContainerExitCodeEnv = "KRUISE_TERMINATE_SIDECAR_IGNORE_EXIT_CODE"
|
||||
)
|
||||
|
|
|
@ -28,6 +28,11 @@ type WorkloadSpreadSpec struct {
|
|||
// TargetReference is the target workload that WorkloadSpread want to control.
|
||||
TargetReference *TargetReference `json:"targetRef"`
|
||||
|
||||
// TargetFilter allows WorkloadSpread to manage only a portion of the Pods in the TargetReference:
|
||||
// by specifying the criteria for the Pods to be managed through a label selector,
|
||||
// and by specifying how to obtain the total number of these selected Pods from the workload using replicasPaths.
|
||||
TargetFilter *TargetFilter `json:"targetFilter,omitempty"`
|
||||
|
||||
// Subsets describes the pods distribution details between each of subsets.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
|
@ -48,6 +53,58 @@ type TargetReference struct {
|
|||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
/*
|
||||
TargetFilter is an optional parameter that allows WorkloadSpread to manage only a subset of the Pods generated by the target workload.
|
||||
|
||||
For example, suppose a WorkloadSpread points to the following Kubeflow TFJob resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: kubeflow.org/v1
|
||||
kind: TFJob
|
||||
spec:
|
||||
tfReplicaSpecs:
|
||||
PS:
|
||||
replicas: 1
|
||||
...
|
||||
MASTER:
|
||||
replicas: 1
|
||||
...
|
||||
Worker:
|
||||
replicas: 2
|
||||
...
|
||||
```
|
||||
|
||||
If you want to manage only the 2 Worker Pods that are generated, you need to configure the TargetFilter as follows:
|
||||
|
||||
```yaml
|
||||
targetFilter:
|
||||
selector:
|
||||
matchLabels:
|
||||
role: worker
|
||||
replicasPathList:
|
||||
- spec.tfReplicaSpecs.Worker.replicas
|
||||
```
|
||||
|
||||
With this configuration, the PS Pods and Master Pods generated by the TFJob will not be managed by WorkloadSpread and will not be
|
||||
counted toward the total number of replicas.
|
||||
*/
|
||||
type TargetFilter struct {
|
||||
// Selector is used to filter the Pods to be managed.
|
||||
//
|
||||
//+optional
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||
|
||||
// ReplicasPathList is a list of resource paths used to specify how to determine the total number of replicas of
|
||||
// the target workload after filtering. If this list is not empty, WorkloadSpread will look for the corresponding
|
||||
// values in the target resource according to each path, and treat the sum of these values as the total number of replicas after filtering.
|
||||
//
|
||||
// The replicas path is a dot-separated path, similar to "spec.replicas". If there are arrays, you can use numbers to denote indexes, like "subsets.1.replicas".
|
||||
// The real values of these paths must be integers.
|
||||
//
|
||||
// +optional
|
||||
ReplicasPathList []string `json:"replicasPathList,omitempty"`
|
||||
}
|
||||
|
||||
// WorkloadSpreadScheduleStrategyType is a string enumeration type that enumerates
|
||||
// all possible schedule strategies for the WorkloadSpread controller.
|
||||
// +kubebuilder:validation:Enum=Adaptive;Fixed;""
|
||||
|
|
|
@ -30,6 +30,31 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdaptiveUnitedDeploymentStrategy) DeepCopyInto(out *AdaptiveUnitedDeploymentStrategy) {
|
||||
*out = *in
|
||||
if in.RescheduleCriticalSeconds != nil {
|
||||
in, out := &in.RescheduleCriticalSeconds, &out.RescheduleCriticalSeconds
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.UnschedulableDuration != nil {
|
||||
in, out := &in.UnschedulableDuration, &out.UnschedulableDuration
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveUnitedDeploymentStrategy.
|
||||
func (in *AdaptiveUnitedDeploymentStrategy) DeepCopy() *AdaptiveUnitedDeploymentStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdaptiveUnitedDeploymentStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdaptiveWorkloadSpreadStrategy) DeepCopyInto(out *AdaptiveWorkloadSpreadStrategy) {
|
||||
*out = *in
|
||||
|
@ -2686,6 +2711,11 @@ func (in *SidecarContainer) DeepCopyInto(out *SidecarContainer) {
|
|||
in.Container.DeepCopyInto(&out.Container)
|
||||
out.UpgradeStrategy = in.UpgradeStrategy
|
||||
out.ShareVolumePolicy = in.ShareVolumePolicy
|
||||
if in.ShareVolumeDevicePolicy != nil {
|
||||
in, out := &in.ShareVolumeDevicePolicy, &out.ShareVolumeDevicePolicy
|
||||
*out = new(ShareVolumePolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.TransferEnv != nil {
|
||||
in, out := &in.TransferEnv, &out.TransferEnv
|
||||
*out = make([]TransferEnvVar, len(*in))
|
||||
|
@ -3243,6 +3273,31 @@ func (in *SyncStatus) DeepCopy() *SyncStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetFilter) DeepCopyInto(out *TargetFilter) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ReplicasPathList != nil {
|
||||
in, out := &in.ReplicasPathList, &out.ReplicasPathList
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetFilter.
|
||||
func (in *TargetFilter) DeepCopy() *TargetFilter {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TargetFilter)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetReference) DeepCopyInto(out *TargetReference) {
|
||||
*out = *in
|
||||
|
@ -3268,6 +3323,7 @@ func (in *Topology) DeepCopyInto(out *Topology) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.ScheduleStrategy.DeepCopyInto(&out.ScheduleStrategy)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology.
|
||||
|
@ -3380,6 +3436,26 @@ func (in *UnitedDeploymentList) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UnitedDeploymentScheduleStrategy) DeepCopyInto(out *UnitedDeploymentScheduleStrategy) {
|
||||
*out = *in
|
||||
if in.Adaptive != nil {
|
||||
in, out := &in.Adaptive, &out.Adaptive
|
||||
*out = new(AdaptiveUnitedDeploymentStrategy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentScheduleStrategy.
|
||||
func (in *UnitedDeploymentScheduleStrategy) DeepCopy() *UnitedDeploymentScheduleStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UnitedDeploymentScheduleStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UnitedDeploymentSpec) DeepCopyInto(out *UnitedDeploymentSpec) {
|
||||
*out = *in
|
||||
|
@ -3428,6 +3504,13 @@ func (in *UnitedDeploymentStatus) DeepCopyInto(out *UnitedDeploymentStatus) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.SubsetStatuses != nil {
|
||||
in, out := &in.SubsetStatuses, &out.SubsetStatuses
|
||||
*out = make([]UnitedDeploymentSubsetStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]UnitedDeploymentCondition, len(*in))
|
||||
|
@ -3452,6 +3535,44 @@ func (in *UnitedDeploymentStatus) DeepCopy() *UnitedDeploymentStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UnitedDeploymentSubsetCondition) DeepCopyInto(out *UnitedDeploymentSubsetCondition) {
|
||||
*out = *in
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentSubsetCondition.
|
||||
func (in *UnitedDeploymentSubsetCondition) DeepCopy() *UnitedDeploymentSubsetCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UnitedDeploymentSubsetCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UnitedDeploymentSubsetStatus) DeepCopyInto(out *UnitedDeploymentSubsetStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]UnitedDeploymentSubsetCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentSubsetStatus.
|
||||
func (in *UnitedDeploymentSubsetStatus) DeepCopy() *UnitedDeploymentSubsetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UnitedDeploymentSubsetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UnitedDeploymentUpdateStrategy) DeepCopyInto(out *UnitedDeploymentUpdateStrategy) {
|
||||
*out = *in
|
||||
|
@ -3635,6 +3756,11 @@ func (in *WorkloadSpreadSpec) DeepCopyInto(out *WorkloadSpreadSpec) {
|
|||
*out = new(TargetReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.TargetFilter != nil {
|
||||
in, out := &in.TargetFilter, &out.TargetFilter
|
||||
*out = new(TargetFilter)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Subsets != nil {
|
||||
in, out := &in.Subsets, &out.Subsets
|
||||
*out = make([]WorkloadSpreadSubset, len(*in))
|
||||
|
|
|
@ -17,12 +17,11 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
appspub "github.com/openkruise/kruise/apis/apps/pub"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
appspub "github.com/openkruise/kruise/apis/apps/pub"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -30,6 +29,39 @@ const (
|
|||
MaxMinReadySeconds = 300
|
||||
)
|
||||
|
||||
// VolumeClaimUpdateStrategyType defines the update strategy types for volume claims.
|
||||
// It is an enumerated type that provides two different update strategies.
|
||||
// +enum
|
||||
type VolumeClaimUpdateStrategyType string
|
||||
|
||||
const (
|
||||
// OnPodRollingUpdateVolumeClaimUpdateStrategyType indicates that volume claim updates are triggered when associated Pods undergo rolling updates.
|
||||
// This strategy ensures that storage availability and integrity are maintained during the update process.
|
||||
OnPodRollingUpdateVolumeClaimUpdateStrategyType VolumeClaimUpdateStrategyType = "OnPodRollingUpdate"
|
||||
|
||||
// OnPVCDeleteVolumeClaimUpdateStrategyType indicates that updates are triggered when a Persistent Volume Claim (PVC) is deleted.
|
||||
// This strategy places full control of the update timing in the hands of the user, typically executed after ensuring data has been backed up or there are no data security concerns,
|
||||
// allowing for storage resource management that aligns with specific user requirements and security policies.
|
||||
OnPVCDeleteVolumeClaimUpdateStrategyType VolumeClaimUpdateStrategyType = "OnDelete"
|
||||
)
|
||||
|
||||
// VolumeClaimStatus describes the status of a volume claim template.
|
||||
// It provides details about the compatibility and readiness of the volume claim.
|
||||
type VolumeClaimStatus struct {
|
||||
// VolumeClaimName is the name of the volume claim.
|
||||
// This is a unique identifier used to reference a specific volume claim.
|
||||
VolumeClaimName string `json:"volumeClaimName"`
|
||||
// CompatibleReplicas is the number of replicas currently compatible with the volume claim.
|
||||
// It indicates how many replicas can function properly, being compatible with this volume claim.
|
||||
// Compatibility is determined by whether the PVC spec storage requests are greater than or equal to the template spec storage requests
|
||||
CompatibleReplicas int32 `json:"compatibleReplicas"`
|
||||
// CompatibleReadyReplicas is the number of replicas that are both ready and compatible with the volume claim.
|
||||
// It highlights that these replicas are not only compatible but also ready to be put into service immediately.
|
||||
// Compatibility is determined by whether the pvc spec storage requests are greater than or equal to the template spec storage requests
|
||||
// The "ready" status is determined by whether the PVC status capacity is greater than or equal to the PVC spec storage requests.
|
||||
CompatibleReadyReplicas int32 `json:"compatibleReadyReplicas"`
|
||||
}
|
||||
|
||||
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||
// controller will use to perform updates. It includes any additional parameters
|
||||
// necessary to perform the update for the indicated strategy.
|
||||
|
@ -43,11 +75,18 @@ type StatefulSetUpdateStrategy struct {
|
|||
RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeClaimUpdateStrategy defines the strategy for updating volume claims.
|
||||
// This structure is used to control how updates to PersistentVolumeClaims are handled during pod rolling updates or PersistentVolumeClaim deletions.
|
||||
type VolumeClaimUpdateStrategy struct {
|
||||
// Type specifies the type of update strategy, possible values include:
|
||||
// OnPodRollingUpdateVolumeClaimUpdateStrategyType: Apply the update strategy during pod rolling updates.
|
||||
// OnPVCDeleteVolumeClaimUpdateStrategyType: Apply the update strategy when a PersistentVolumeClaim is deleted.
|
||||
Type VolumeClaimUpdateStrategyType `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
|
||||
type RollingUpdateStatefulSetStrategy struct {
|
||||
// Partition indicates the ordinal at which the StatefulSet should be partitioned by default.
|
||||
// But if unorderedUpdate has been set:
|
||||
// - Partition indicates the number of pods with non-updated revisions when rolling update.
|
||||
// Partition indicates the number of pods the StatefulSet should be partitioned by default.
|
||||
// - It means controller will update $(replicas - partition) number of pod.
|
||||
// Default value is 0.
|
||||
// +optional
|
||||
|
@ -129,7 +168,7 @@ const (
|
|||
)
|
||||
|
||||
// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
|
||||
// created from the StatefulSet VolumeClaimTemplates.
|
||||
// created from the StatefulSet VolumeClaims.
|
||||
type StatefulSetPersistentVolumeClaimRetentionPolicy struct {
|
||||
// WhenDeleted specifies what happens to PVCs created from StatefulSet
|
||||
// VolumeClaimTemplates when the StatefulSet is deleted. The default policy
|
||||
|
@ -194,6 +233,11 @@ type StatefulSetSpec struct {
|
|||
// +kubebuilder:validation:Schemaless
|
||||
VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"`
|
||||
|
||||
// VolumeClaimUpdateStrategy specifies the strategy for updating VolumeClaimTemplates within a StatefulSet.
|
||||
// This field is currently only effective if the StatefulSetAutoResizePVCGate is enabled.
|
||||
// +optional
|
||||
VolumeClaimUpdateStrategy VolumeClaimUpdateStrategy `json:"volumeClaimUpdateStrategy,omitempty"`
|
||||
|
||||
// serviceName is the name of the service that governs this StatefulSet.
|
||||
// This service must exist before the StatefulSet, and is responsible for
|
||||
// the network identity of the set. Pods get DNS/hostnames that follow the
|
||||
|
@ -230,7 +274,8 @@ type StatefulSetSpec struct {
|
|||
// Then controller will delete Pod-1 and create Pod-3 (existing Pods will be [0, 2, 3])
|
||||
// - If you just want to delete Pod-1, you should set spec.reserveOrdinal to [1] and spec.replicas to 2.
|
||||
// Then controller will delete Pod-1 (existing Pods will be [0, 2])
|
||||
ReserveOrdinals []int `json:"reserveOrdinals,omitempty"`
|
||||
// You can also use ranges along with numbers, such as [1, 3-5], which is a shortcut for [1, 3, 4, 5].
|
||||
ReserveOrdinals []intstr.IntOrString `json:"reserveOrdinals,omitempty"`
|
||||
|
||||
// Lifecycle defines the lifecycle hooks for Pods pre-delete, in-place update.
|
||||
Lifecycle *appspub.Lifecycle `json:"lifecycle,omitempty"`
|
||||
|
@ -317,6 +362,12 @@ type StatefulSetStatus struct {
|
|||
|
||||
// LabelSelector is label selectors for query over pods that should match the replica count used by HPA.
|
||||
LabelSelector string `json:"labelSelector,omitempty"`
|
||||
|
||||
// VolumeClaims represents the status of compatibility between existing PVCs
|
||||
// and their respective templates. It tracks whether the PersistentVolumeClaims have been updated
|
||||
// to match any changes made to the volumeClaimTemplates, ensuring synchronization
|
||||
// between the defined templates and the actual PersistentVolumeClaims in use.
|
||||
VolumeClaims []VolumeClaimStatus `json:"volumeClaims,omitempty"`
|
||||
}
|
||||
|
||||
// These are valid conditions of a statefulset.
|
||||
|
|
|
@ -199,6 +199,7 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
out.VolumeClaimUpdateStrategy = in.VolumeClaimUpdateStrategy
|
||||
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
|
||||
if in.RevisionHistoryLimit != nil {
|
||||
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
|
||||
|
@ -207,7 +208,7 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
|||
}
|
||||
if in.ReserveOrdinals != nil {
|
||||
in, out := &in.ReserveOrdinals, &out.ReserveOrdinals
|
||||
*out = make([]int, len(*in))
|
||||
*out = make([]intstr.IntOrString, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Lifecycle != nil {
|
||||
|
@ -257,6 +258,11 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.VolumeClaims != nil {
|
||||
in, out := &in.VolumeClaims, &out.VolumeClaims
|
||||
*out = make([]VolumeClaimStatus, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
|
||||
|
@ -308,3 +314,33 @@ func (in *UnorderedUpdateStrategy) DeepCopy() *UnorderedUpdateStrategy {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeClaimStatus) DeepCopyInto(out *VolumeClaimStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimStatus.
|
||||
func (in *VolumeClaimStatus) DeepCopy() *VolumeClaimStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeClaimStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeClaimUpdateStrategy) DeepCopyInto(out *VolumeClaimUpdateStrategy) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimUpdateStrategy.
|
||||
func (in *VolumeClaimUpdateStrategy) DeepCopy() *VolumeClaimUpdateStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeClaimUpdateStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -27,17 +27,28 @@ import (
|
|||
type PubOperation string
|
||||
|
||||
const (
|
||||
// PubProtectOperationAnnotation indicates the pub protected Operation[DELETE,UPDATE,EVICT]
|
||||
// if annotations[kruise.io/pub-protect-operations]=EVICT indicates the pub only protect evict pod
|
||||
// if the annotations do not exist, the default DELETE,EVICT,UPDATE are protected
|
||||
// PubProtectOperationAnnotation indicates the pub protected Operation[DELETE,UPDATE,EVICT].
|
||||
// if annotations[kruise.io/pub-protect-operations]=EVICT indicates the pub only protect evict pod.
|
||||
// if the annotations do not exist, the default DELETE,EVICT,UPDATE are protected.
|
||||
// RESIZE: Pod vertical scaling action. If it's enabled, all resize action will be protected. RESIZE
|
||||
// is an extension of UPDATE, if RESIZE is disabled and UPDATE is enabled, any UPDATE operation will
|
||||
// be protected only as it will definitely cause container restarts.
|
||||
// UPDATE: Kruise will carefully differentiate whether this update will cause interruptions. When
|
||||
// the FeatureGate InPlacePodVerticalScaling is enabled, pod inplace vertical scaling will be
|
||||
// considered non-disruption only when allowedResources(cpu、memory) changes、restartPolicy
|
||||
// is not restartContainer、is not static pod and QoS not changed. But if featureGate
|
||||
// InPlacePodVerticalScaling is disabled, all resize action will be considered as disruption.
|
||||
PubProtectOperationAnnotation = "kruise.io/pub-protect-operations"
|
||||
// pod webhook operation
|
||||
PubUpdateOperation PubOperation = "UPDATE"
|
||||
PubDeleteOperation PubOperation = "DELETE"
|
||||
PubEvictOperation PubOperation = "EVICT"
|
||||
// PubProtectTotalReplicas indicates the pub protected total replicas, rather than workload.spec.replicas.
|
||||
// and must be used with pub.spec.selector.
|
||||
PubProtectTotalReplicas = "pub.kruise.io/protect-total-replicas"
|
||||
PubResizeOperation PubOperation = "RESIZE"
|
||||
// PubProtectTotalReplicasAnnotation is the target replicas.
|
||||
// By default, PUB will get the target replicas through workload.spec.replicas. but there are some scenarios that may workload doesn't
|
||||
// implement scale subresources or Pod doesn't have workload management. In this scenario, you can set pub.kruise.io/protect-total-replicas
|
||||
// in pub annotations to get the target replicas to realize the same effect of protection ability.
|
||||
PubProtectTotalReplicasAnnotation = "pub.kruise.io/protect-total-replicas"
|
||||
// Marked the pod will not be pub-protected, solving the scenario of force pod deletion
|
||||
PodPubNoProtectionAnnotation = "pub.kruise.io/no-protect"
|
||||
)
|
||||
|
|
|
@ -47,6 +47,13 @@ var (
|
|||
enablePprof = flag.Bool("enable-pprof", true, "Enable pprof for daemon.")
|
||||
pluginConfigFile = flag.String("plugin-config-file", "/kruise/CredentialProviderPlugin.yaml", "The path of plugin config file.")
|
||||
pluginBinDir = flag.String("plugin-bin-dir", "/kruise/plugins", "The path of directory of plugin binaries.")
|
||||
|
||||
// TODO: After the feature is stable, the default value should also be restricted, e.g. 5.
|
||||
|
||||
// Users can set this value to limit the number of workers for pulling images,
|
||||
// preventing the consumption of all available disk IOPS or network bandwidth,
|
||||
// which could otherwise impact the performance of other running pods.
|
||||
maxWorkersForPullImage = flag.Int("max-workers-for-pull-image", -1, "The maximum number of workers for pulling images.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -71,7 +78,7 @@ func main() {
|
|||
}()
|
||||
}
|
||||
ctx := signals.SetupSignalHandler()
|
||||
d, err := daemon.NewDaemon(cfg, *bindAddr)
|
||||
d, err := daemon.NewDaemon(cfg, *bindAddr, *maxWorkersForPullImage)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to new daemon: %v", err)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
Copyright 2024 The Kruise Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
func main() {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
kc, err := kruiseclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cloneSets, err := kc.AppsV1alpha1().CloneSets("").List(context.Background(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(cloneSets.Items) > 0 || cloneSets.Continue != "" {
|
||||
log.Fatalln("there still exists some clonesets in the cluster")
|
||||
}
|
||||
statefulSets, err := kc.AppsV1alpha1().StatefulSets("").List(context.Background(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(statefulSets.Items) > 0 || statefulSets.Continue != "" {
|
||||
log.Fatalln("there still exists some advanced statefulsets in the cluster")
|
||||
}
|
||||
statefulSetsBeta1, err := kc.AppsV1beta1().StatefulSets("").List(context.Background(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(statefulSetsBeta1.Items) > 0 || statefulSetsBeta1.Continue != "" {
|
||||
log.Fatalln("there still exists some advanced statefulsets in the cluster")
|
||||
}
|
||||
daemonSets, err := kc.AppsV1alpha1().DaemonSets("").List(context.Background(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(daemonSets.Items) > 0 || daemonSets.Continue != "" {
|
||||
log.Fatalln("there still exists some advanced daemonsets in the cluster")
|
||||
}
|
||||
log.Println("cluster is clean, ready to delete kruise")
|
||||
}
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: advancedcronjobs.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -202,24 +202,8 @@ spec:
|
|||
active:
|
||||
description: A list of pointers to currently running jobs.
|
||||
items:
|
||||
description: |-
|
||||
ObjectReference contains enough information to let you inspect or modify the referred object.
|
||||
---
|
||||
New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
|
||||
1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
|
||||
2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
|
||||
restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
|
||||
Those cannot be well described when embedded.
|
||||
3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
|
||||
4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
|
||||
during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
|
||||
and the version of the actual struct is irrelevant.
|
||||
5. We cannot easily change it. Because this type is embedded in many locations, updates to this type
|
||||
will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
|
||||
|
||||
|
||||
Instead of using this type, create a locally provided and used type that is well-focused on your reference.
|
||||
For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
|
||||
description: ObjectReference contains enough information to let
|
||||
you inspect or modify the referred object.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
|
@ -233,7 +217,6 @@ spec:
|
|||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: broadcastjobs.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: clonesets.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -29,6 +29,10 @@ spec:
|
|||
jsonPath: .status.updatedReadyReplicas
|
||||
name: UPDATED_READY
|
||||
type: integer
|
||||
- description: The number of pods updated and available.
|
||||
jsonPath: .status.updatedAvailableReplicas
|
||||
name: UPDATED_AVAILABLE
|
||||
type: integer
|
||||
- description: The number of pods ready.
|
||||
jsonPath: .status.readyReplicas
|
||||
name: READY
|
||||
|
@ -234,11 +238,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -371,11 +377,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -512,6 +520,8 @@ spec:
|
|||
description: |-
|
||||
UpdatedAvailableReplicas is the number of Pods created by the CloneSet controller from the CloneSet version
|
||||
indicated by updateRevision and have a Ready Condition for at least minReadySeconds.
|
||||
Notice: when enable InPlaceWorkloadVerticalScaling, pod during resource resizing will also be unavailable.
|
||||
This means these pod will be counted in maxUnavailable.
|
||||
format: int32
|
||||
type: integer
|
||||
updatedReadyReplicas:
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: containerrecreaterequests.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -143,6 +143,7 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
|
@ -172,6 +173,7 @@ spec:
|
|||
- value
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
path:
|
||||
description: Path to access on the HTTP server.
|
||||
type: string
|
||||
|
@ -196,7 +198,6 @@ spec:
|
|||
description: |-
|
||||
TCPSocket specifies an action involving a TCP port.
|
||||
TCP hooks not yet supported
|
||||
TODO: implement a realistic TCP lifecycle hook
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: daemonsets.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -204,11 +204,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -327,11 +329,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: ephemeraljobs.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -127,11 +127,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: imagelistpulljobs.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -140,11 +140,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -225,11 +227,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: imagepulljobs.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -146,11 +146,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -231,11 +233,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: nodeimages.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -116,24 +116,8 @@ spec:
|
|||
List of objects depended by this object. If this image is managed by a controller,
|
||||
then an entry in this list will point to this controller.
|
||||
items:
|
||||
description: |-
|
||||
ObjectReference contains enough information to let you inspect or modify the referred object.
|
||||
---
|
||||
New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
|
||||
1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
|
||||
2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
|
||||
restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
|
||||
Those cannot be well described when embedded.
|
||||
3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
|
||||
4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
|
||||
during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
|
||||
and the version of the actual struct is irrelevant.
|
||||
5. We cannot easily change it. Because this type is embedded in many locations, updates to this type
|
||||
will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
|
||||
|
||||
|
||||
Instead of using this type, create a locally provided and used type that is well-focused on your reference.
|
||||
For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
|
||||
description: ObjectReference contains enough information
|
||||
to let you inspect or modify the referred object.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
|
@ -147,7 +131,6 @@ spec:
|
|||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
|
@ -220,7 +203,6 @@ spec:
|
|||
concurrency, change detection, and the watch operation on a resource or set of resources.
|
||||
Clients must treat these values as opaque and passed unmodified back to the server.
|
||||
|
||||
|
||||
Populated by the system.
|
||||
Read-only.
|
||||
Value must be treated as opaque by clients and .
|
||||
|
@ -330,6 +312,10 @@ spec:
|
|||
description: The number of pulling tasks which reached phase Succeeded.
|
||||
format: int32
|
||||
type: integer
|
||||
waiting:
|
||||
description: The number of pulling tasks which are waiting.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- desired
|
||||
type: object
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: nodepodprobes.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -66,7 +66,8 @@ spec:
|
|||
description: container probe spec
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in
|
||||
the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -78,6 +79,7 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
failureThreshold:
|
||||
description: |-
|
||||
|
@ -86,8 +88,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a
|
||||
GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service.
|
||||
|
@ -95,19 +96,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
service:
|
||||
default: ""
|
||||
description: |-
|
||||
Service is the name of the service to place in the gRPC HealthCheckRequest
|
||||
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
|
||||
|
||||
|
||||
If this is not specified, the default behavior is defined by gRPC.
|
||||
type: string
|
||||
required:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to
|
||||
perform.
|
||||
description: HTTPGet specifies an HTTP GET request
|
||||
to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
|
@ -134,6 +135,7 @@ spec:
|
|||
- value
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
path:
|
||||
description: Path to access on the HTTP server.
|
||||
type: string
|
||||
|
@ -173,8 +175,8 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving
|
||||
a TCP port.
|
||||
description: TCPSocket specifies a connection to a
|
||||
TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to,
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: persistentpodstates.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: podprobemarkers.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -93,7 +93,8 @@ spec:
|
|||
description: container probe spec
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the
|
||||
container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -105,6 +106,7 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
failureThreshold:
|
||||
description: |-
|
||||
|
@ -113,7 +115,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number
|
||||
|
@ -121,18 +123,18 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
service:
|
||||
default: ""
|
||||
description: |-
|
||||
Service is the name of the service to place in the gRPC HealthCheckRequest
|
||||
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
|
||||
|
||||
|
||||
If this is not specified, the default behavior is defined by gRPC.
|
||||
type: string
|
||||
required:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
|
@ -159,6 +161,7 @@ spec:
|
|||
- value
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
path:
|
||||
description: Path to access on the HTTP server.
|
||||
type: string
|
||||
|
@ -198,8 +201,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP
|
||||
port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults
|
||||
|
@ -277,11 +279,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: resourcedistributions.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -131,11 +131,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: sidecarsets.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -73,10 +73,19 @@ spec:
|
|||
otherwise it will be injected into the back.
|
||||
default BeforeAppContainerType
|
||||
type: string
|
||||
shareVolumeDevicePolicy:
|
||||
description: |-
|
||||
If ShareVolumeDevicePolicy is enabled, the sidecar container will share the other container's VolumeDevices
|
||||
in the pod(don't contain the injected sidecar container).
|
||||
This is a pointer to ensure that the sidecarset-hash does not change if the user does not configure this field, mainly for compatibility with older versions.
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
shareVolumePolicy:
|
||||
description: |-
|
||||
If ShareVolumePolicy is enabled, the sidecar container will share the other container's VolumeMounts
|
||||
in the pod(don't contains the injected sidecar container).
|
||||
in the pod(not including the injected sidecar container).
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
|
@ -146,10 +155,13 @@ spec:
|
|||
referenced object inside the same namespace.
|
||||
properties:
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
|
@ -170,10 +182,19 @@ spec:
|
|||
otherwise it will be injected into the back.
|
||||
default BeforeAppContainerType
|
||||
type: string
|
||||
shareVolumeDevicePolicy:
|
||||
description: |-
|
||||
If ShareVolumeDevicePolicy is enabled, the sidecar container will share the other container's VolumeDevices
|
||||
in the pod(don't contain the injected sidecar container).
|
||||
This is a pointer to ensure that the sidecarset-hash does not change if the user does not configure this field, mainly for compatibility with older versions.
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
shareVolumePolicy:
|
||||
description: |-
|
||||
If ShareVolumePolicy is enabled, the sidecar container will share the other container's VolumeMounts
|
||||
in the pod(don't contains the injected sidecar container).
|
||||
in the pod(not including the injected sidecar container).
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
|
@ -258,9 +279,11 @@ spec:
|
|||
history SidecarSet to inject specific version of the sidecar to pods.
|
||||
type: string
|
||||
policy:
|
||||
description: |-
|
||||
Policy describes the behavior of revision injection.
|
||||
Defaults to Always.
|
||||
default: Always
|
||||
description: Policy describes the behavior of revision injection.
|
||||
enum:
|
||||
- Always
|
||||
- Partial
|
||||
type: string
|
||||
revisionName:
|
||||
description: RevisionName corresponds to a specific ControllerRevision
|
||||
|
@ -304,11 +327,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -371,11 +396,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -414,8 +441,7 @@ spec:
|
|||
paused:
|
||||
description: |-
|
||||
Paused indicates that the SidecarSet is paused to update the injected pods,
|
||||
but it don't affect the webhook inject sidecar container into the newly created pods.
|
||||
default is false
|
||||
For the impact on the injection behavior for newly created Pods, please refer to the comments of Selector.
|
||||
type: boolean
|
||||
priorityStrategy:
|
||||
description: |-
|
||||
|
@ -483,11 +509,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -527,8 +555,13 @@ spec:
|
|||
type: object
|
||||
type: array
|
||||
selector:
|
||||
description: If selector is not nil, this upgrade will only update
|
||||
the selected pods.
|
||||
description: |-
|
||||
If selector is not nil, this upgrade will only update the selected pods.
|
||||
|
||||
Starting from Kruise 1.8.0, the updateStrategy.Selector affects the version of the Sidecar container
|
||||
injected into newly created Pods by a SidecarSet configured with an injectionStrategy.
|
||||
In most cases, all newly created Pods are injected with the specified Sidecar version as configured in injectionStrategy.revision,
|
||||
which is consistent with previous versions.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
|
@ -556,11 +589,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: statefulsets.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -93,7 +93,6 @@ spec:
|
|||
These are replicas in the sense that they are instantiations of the
|
||||
same Template, but individual replicas also have a consistent identity.
|
||||
If unspecified, defaults to 1.
|
||||
TODO: Consider a rename of this field.
|
||||
format: int32
|
||||
type: integer
|
||||
revisionHistoryLimit:
|
||||
|
@ -136,11 +135,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -301,11 +302,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -344,7 +347,6 @@ spec:
|
|||
this list must have at least one matching (by name) volumeMount in one
|
||||
container in the template. A claim in this list takes precedence over
|
||||
any volumes in the template, with the same name.
|
||||
TODO: Define the behavior if a claim already exists with the same name.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
required:
|
||||
- selector
|
||||
|
@ -640,7 +642,6 @@ spec:
|
|||
These are replicas in the sense that they are instantiations of the
|
||||
same Template, but individual replicas also have a consistent identity.
|
||||
If unspecified, defaults to 1.
|
||||
TODO: Consider a rename of this field.
|
||||
format: int32
|
||||
type: integer
|
||||
reserveOrdinals:
|
||||
|
@ -652,8 +653,12 @@ spec:
|
|||
Then controller will delete Pod-1 and create Pod-3 (existing Pods will be [0, 2, 3])
|
||||
- If you just want to delete Pod-1, you should set spec.reserveOrdinal to [1] and spec.replicas to 2.
|
||||
Then controller will delete Pod-1 (existing Pods will be [0, 2])
|
||||
You can also use ranges along with numbers, such as [1, 3-5], which is a shortcut for [1, 3, 4, 5].
|
||||
items:
|
||||
type: integer
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
x-kubernetes-int-or-string: true
|
||||
type: array
|
||||
revisionHistoryLimit:
|
||||
description: |-
|
||||
|
@ -711,11 +716,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -784,9 +791,7 @@ spec:
|
|||
type: integer
|
||||
partition:
|
||||
description: |-
|
||||
Partition indicates the ordinal at which the StatefulSet should be partitioned by default.
|
||||
But if unorderedUpdate has been set:
|
||||
- Partition indicates the number of pods with non-updated revisions when rolling update.
|
||||
Partition indicates the number of pods the StatefulSet should be partitioned by default.
|
||||
- It means controller will update $(replicas - partition) number of pod.
|
||||
Default value is 0.
|
||||
format: int32
|
||||
|
@ -876,11 +881,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -919,8 +926,19 @@ spec:
|
|||
this list must have at least one matching (by name) volumeMount in one
|
||||
container in the template. A claim in this list takes precedence over
|
||||
any volumes in the template, with the same name.
|
||||
TODO: Define the behavior if a claim already exists with the same name.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
volumeClaimUpdateStrategy:
|
||||
description: |-
|
||||
VolumeClaimUpdateStrategy specifies the strategy for updating VolumeClaimTemplates within a StatefulSet.
|
||||
This field is currently only effective if the StatefulSetAutoResizePVCGate is enabled.
|
||||
properties:
|
||||
type:
|
||||
description: |-
|
||||
Type specifies the type of update strategy, possible values include:
|
||||
OnPodRollingUpdateVolumeClaimUpdateStrategyType: Apply the update strategy during pod rolling updates.
|
||||
OnPVCDeleteVolumeClaimUpdateStrategyType: Apply the update strategy when a PersistentVolumeClaim is deleted.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- selector
|
||||
- template
|
||||
|
@ -1024,6 +1042,43 @@ spec:
|
|||
indicated by updateRevision.
|
||||
format: int32
|
||||
type: integer
|
||||
volumeClaims:
|
||||
description: |-
|
||||
VolumeClaims represents the status of compatibility between existing PVCs
|
||||
and their respective templates. It tracks whether the PersistentVolumeClaims have been updated
|
||||
to match any changes made to the volumeClaimTemplates, ensuring synchronization
|
||||
between the defined templates and the actual PersistentVolumeClaims in use.
|
||||
items:
|
||||
description: |-
|
||||
VolumeClaimStatus describes the status of a volume claim template.
|
||||
It provides details about the compatibility and readiness of the volume claim.
|
||||
properties:
|
||||
compatibleReadyReplicas:
|
||||
description: |-
|
||||
CompatibleReadyReplicas is the number of replicas that are both ready and compatible with the volume claim.
|
||||
It highlights that these replicas are not only compatible but also ready to be put into service immediately.
|
||||
Compatibility is determined by whether the pvc spec storage requests are greater than or equal to the template spec storage requests
|
||||
The "ready" status is determined by whether the PVC status capacity is greater than or equal to the PVC spec storage requests.
|
||||
format: int32
|
||||
type: integer
|
||||
compatibleReplicas:
|
||||
description: |-
|
||||
CompatibleReplicas is the number of replicas currently compatible with the volume claim.
|
||||
It indicates how many replicas can function properly, being compatible with this volume claim.
|
||||
Compatibility is determined by whether the PVC spec storage requests are greater than or equal to the template spec storage requests
|
||||
format: int32
|
||||
type: integer
|
||||
volumeClaimName:
|
||||
description: |-
|
||||
VolumeClaimName is the name of the volume claim.
|
||||
This is a unique identifier used to reference a specific volume claim.
|
||||
type: string
|
||||
required:
|
||||
- compatibleReadyReplicas
|
||||
- compatibleReplicas
|
||||
- volumeClaimName
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- availableReplicas
|
||||
- currentReplicas
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: uniteddeployments.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -108,11 +108,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -263,7 +265,6 @@ spec:
|
|||
These are replicas in the sense that they are instantiations of the
|
||||
same Template, but individual replicas also have a consistent identity.
|
||||
If unspecified, defaults to 1.
|
||||
TODO: Consider a rename of this field.
|
||||
format: int32
|
||||
type: integer
|
||||
reserveOrdinals:
|
||||
|
@ -275,8 +276,12 @@ spec:
|
|||
Then controller will delete Pod-1 and create Pod-3 (existing Pods will be [0, 2, 3])
|
||||
- If you just want to delete Pod-1, you should set spec.reserveOrdinal to [1] and spec.replicas to 2.
|
||||
Then controller will delete Pod-1 (existing Pods will be [0, 2])
|
||||
You can also use ranges along with numbers, such as [1, 3-5], which is a shortcut for [1, 3, 4, 5].
|
||||
items:
|
||||
type: integer
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
x-kubernetes-int-or-string: true
|
||||
type: array
|
||||
revisionHistoryLimit:
|
||||
description: |-
|
||||
|
@ -334,11 +339,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -407,9 +414,7 @@ spec:
|
|||
type: integer
|
||||
partition:
|
||||
description: |-
|
||||
Partition indicates the ordinal at which the StatefulSet should be partitioned by default.
|
||||
But if unorderedUpdate has been set:
|
||||
- Partition indicates the number of pods with non-updated revisions when rolling update.
|
||||
Partition indicates the number of pods the StatefulSet should be partitioned by default.
|
||||
- It means controller will update $(replicas - partition) number of pod.
|
||||
Default value is 0.
|
||||
format: int32
|
||||
|
@ -502,11 +507,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -545,8 +552,19 @@ spec:
|
|||
this list must have at least one matching (by name) volumeMount in one
|
||||
container in the template. A claim in this list takes precedence over
|
||||
any volumes in the template, with the same name.
|
||||
TODO: Define the behavior if a claim already exists with the same name.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
volumeClaimUpdateStrategy:
|
||||
description: |-
|
||||
VolumeClaimUpdateStrategy specifies the strategy for updating VolumeClaimTemplates within a StatefulSet.
|
||||
This field is currently only effective if the StatefulSetAutoResizePVCGate is enabled.
|
||||
properties:
|
||||
type:
|
||||
description: |-
|
||||
Type specifies the type of update strategy, possible values include:
|
||||
OnPodRollingUpdateVolumeClaimUpdateStrategyType: Apply the update strategy during pod rolling updates.
|
||||
OnPVCDeleteVolumeClaimUpdateStrategyType: Apply the update strategy when a PersistentVolumeClaim is deleted.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- selector
|
||||
- template
|
||||
|
@ -714,11 +732,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -856,11 +876,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -943,6 +965,45 @@ spec:
|
|||
description: Topology describes the pods distribution detail between
|
||||
each of subsets.
|
||||
properties:
|
||||
scheduleStrategy:
|
||||
description: ScheduleStrategy indicates the strategy the UnitedDeployment
|
||||
used to preform the schedule between each of subsets.
|
||||
properties:
|
||||
adaptive:
|
||||
description: Adaptive is used to communicate parameters when
|
||||
Type is AdaptiveUnitedDeploymentScheduleStrategyType.
|
||||
properties:
|
||||
rescheduleCriticalSeconds:
|
||||
description: |-
|
||||
RescheduleCriticalSeconds indicates how long controller will reschedule a schedule failed Pod to the subset that has
|
||||
redundant capacity after the subset where the Pod lives. If a Pod was scheduled failed and still in an unschedulabe status
|
||||
over RescheduleCriticalSeconds duration, the controller will reschedule it to a suitable subset. Default is 30 seconds.
|
||||
format: int32
|
||||
type: integer
|
||||
reserveUnschedulablePods:
|
||||
description: |-
|
||||
ReserveUnschedulablePods indicates whether to enable reservation rescheduling mode, which is disabled by default.
|
||||
If this feature is enabled, those pending pods that would otherwise be permanently transferred to other subsets
|
||||
due to scheduling failure will be retained, and a temporary substitute Pod will be created in another subset to take over its work.
|
||||
When the retained pod is successfully scheduled and ready, its temporary substitute will be deleted.
|
||||
type: boolean
|
||||
unschedulableDuration:
|
||||
description: |-
|
||||
UnschedulableDuration is used to set the number of seconds for a Subset to recover from an unschedulable state,
|
||||
with a default value of 300 seconds.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
Type indicates the type of the UnitedDeploymentScheduleStrategy.
|
||||
Default is Fixed
|
||||
enum:
|
||||
- Adaptive
|
||||
- Fixed
|
||||
- ""
|
||||
type: string
|
||||
type: object
|
||||
subsets:
|
||||
description: |-
|
||||
Contains the details of each subset. Each element in this array represents one subset
|
||||
|
@ -1011,11 +1072,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements by
|
||||
node's fields.
|
||||
|
@ -1043,11 +1106,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
patch:
|
||||
|
@ -1161,7 +1226,7 @@ spec:
|
|||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: A human readable message indicating details about
|
||||
description: A human-readable message indicating details about
|
||||
the transition.
|
||||
type: string
|
||||
reason:
|
||||
|
@ -1197,6 +1262,10 @@ spec:
|
|||
description: Replicas is the most recently observed number of replicas.
|
||||
format: int32
|
||||
type: integer
|
||||
reservedPods:
|
||||
description: The number of reserved pods in temporary adaptive strategy.
|
||||
format: int32
|
||||
type: integer
|
||||
subsetReplicas:
|
||||
additionalProperties:
|
||||
format: int32
|
||||
|
@ -1204,6 +1273,52 @@ spec:
|
|||
description: Records the topology detail information of the replicas
|
||||
of each subset.
|
||||
type: object
|
||||
subsetStatuses:
|
||||
description: Record the conditions of each subset.
|
||||
items:
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions is an array of current observed subset
|
||||
conditions.
|
||||
items:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
reason:
|
||||
type: string
|
||||
status:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
name:
|
||||
description: Subset name specified in Topology.Subsets
|
||||
type: string
|
||||
partition:
|
||||
description: Records the current partition. Currently unused.
|
||||
format: int32
|
||||
type: integer
|
||||
readyReplicas:
|
||||
description: Records the current ready replicas. Currently unused.
|
||||
format: int32
|
||||
type: integer
|
||||
replicas:
|
||||
description: Records the current replicas. Currently unused.
|
||||
format: int32
|
||||
type: integer
|
||||
reservedPods:
|
||||
description: Records the reserved pods in the subset.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
updateStatus:
|
||||
description: Records the information of update progress.
|
||||
properties:
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: workloadspreads.apps.kruise.io
|
||||
spec:
|
||||
group: apps.kruise.io
|
||||
|
@ -147,11 +147,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
|
@ -179,11 +181,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
weight:
|
||||
|
@ -227,11 +231,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements by node's
|
||||
fields.
|
||||
|
@ -259,11 +265,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
tolerations:
|
||||
|
@ -310,6 +318,70 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
type: array
|
||||
targetFilter:
|
||||
description: |-
|
||||
TargetFilter allows WorkloadSpread to manage only a portion of the Pods in the TargetReference:
|
||||
by specifying the criteria for the Pods to be managed through a label selector,
|
||||
and by specifying how to obtain the total number of these selected Pods from the workload using replicasPaths.
|
||||
properties:
|
||||
replicasPathList:
|
||||
description: |-
|
||||
ReplicasPathList is a list of resource paths used to specify how to determine the total number of replicas of
|
||||
the target workload after filtering. If this list is not empty, WorkloadSpread will look for the corresponding
|
||||
values in the target resource according to each path, and treat the sum of these values as the total number of replicas after filtering.
|
||||
|
||||
The replicas path is a dot-separated path, similar to "spec.replicas". If there are arrays, you can use numbers to denote indexes, like "subsets.1.replicas".
|
||||
The real values of these paths must be integers.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
selector:
|
||||
description: Selector is used to filter the Pods to be managed.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
targetRef:
|
||||
description: TargetReference is the target workload that WorkloadSpread
|
||||
want to control.
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: podunavailablebudgets.policy.kruise.io
|
||||
spec:
|
||||
group: policy.kruise.io
|
||||
|
@ -106,11 +106,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -35,7 +35,7 @@ spec:
|
|||
- --enable-leader-election
|
||||
- --logtostderr=true
|
||||
- --v=5
|
||||
- --feature-gates=AllAlpha=true,EnableExternalCerts=false
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true,EnableExternalCerts=false
|
||||
image: controller:latest
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
|
@ -60,8 +60,8 @@ spec:
|
|||
port: 8000
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
|
@ -102,7 +102,8 @@ spec:
|
|||
args:
|
||||
- --logtostderr=true
|
||||
- -v=5
|
||||
- --feature-gates=AllAlpha=true
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --max-workers-for-pull-image=2
|
||||
image: controller:latest
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
|
|
|
@ -4,6 +4,51 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- events
|
||||
- persistentvolumeclaims
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/ephemeralcontainers
|
||||
- pods/status
|
||||
- pods/resize
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
|
@ -22,15 +67,6 @@ rules:
|
|||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
|
@ -52,18 +88,8 @@ rules:
|
|||
- apps
|
||||
resources:
|
||||
- controllerrevisions
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
|
@ -76,6 +102,7 @@ rules:
|
|||
- apps
|
||||
resources:
|
||||
- deployments/status
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
|
@ -94,30 +121,23 @@ rules:
|
|||
- replicasets/status
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- advancedcronjobs
|
||||
- broadcastjobs
|
||||
- clonesets
|
||||
- containerrecreaterequests
|
||||
- daemonsets
|
||||
- imagelistpulljobs
|
||||
- imagepulljobs
|
||||
- nodeimages
|
||||
- nodepodprobes
|
||||
- persistentpodstates
|
||||
- podprobemarkers
|
||||
- sidecarsets
|
||||
- statefulsets
|
||||
- uniteddeployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
|
@ -130,116 +150,44 @@ rules:
|
|||
- apps.kruise.io
|
||||
resources:
|
||||
- advancedcronjobs/finalizers
|
||||
- broadcastjobs/finalizers
|
||||
- clonesets/finalizers
|
||||
- containerrecreaterequests/finalizers
|
||||
- daemonsets/finalizers
|
||||
- imagelistpulljobs/finalizers
|
||||
- imagepulljobs/finalizers
|
||||
- nodeimages/finalizers
|
||||
- nodepodprobes/finalizers
|
||||
- persistentpodstates/finalizers
|
||||
- podprobemarkers/finalizers
|
||||
- resourcedistributions/finalizers
|
||||
- sidecarsets/finalizers
|
||||
- statefulsets/finalizers
|
||||
- uniteddeployments/finalizers
|
||||
- workloadspreads/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- advancedcronjobs/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- broadcastjobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- broadcastjobs/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- broadcastjobs/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- clonesets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- clonesets/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- clonesets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- containerrecreaterequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- containerrecreaterequests/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- containerrecreaterequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- daemonsets/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- daemonsets/status
|
||||
- ephemeraljobs/finalizers
|
||||
- ephemeraljobs/status
|
||||
- imagelistpulljobs/status
|
||||
- imagepulljobs/status
|
||||
- nodeimages/status
|
||||
- nodepodprobes/status
|
||||
- persistentpodstates/status
|
||||
- podprobemarkers/status
|
||||
- resourcedistributions/status
|
||||
- sidecarsets/status
|
||||
- statefulsets/status
|
||||
- uniteddeployments/status
|
||||
- workloadspreads/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
|
@ -255,178 +203,6 @@ rules:
|
|||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- ephemeraljobs/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- ephemeraljobs/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- imagelistpulljobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- imagelistpulljobs/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- imagelistpulljobs/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- imagepulljobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- imagepulljobs/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- imagepulljobs/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- nodeimages
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- nodeimages/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- nodeimages/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- nodepodprobes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- nodepodprobes/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- nodepodprobes/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- persistentpodstates
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- persistentpodstates/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- persistentpodstates/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- podprobemarkers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- podprobemarkers/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- podprobemarkers/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
|
@ -435,98 +211,6 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- resourcedistributions/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- resourcedistributions/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- sidecarsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- sidecarsets/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- sidecarsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- statefulsets/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- uniteddeployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- uniteddeployments/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- uniteddeployments/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
|
@ -537,20 +221,6 @@ rules:
|
|||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- workloadspreads/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps.kruise.io
|
||||
resources:
|
||||
- workloadspreads/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
|
@ -571,97 +241,6 @@ rules:
|
|||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/ephemeralcontainers
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- policy.kruise.io
|
||||
resources:
|
||||
|
@ -688,3 +267,11 @@ rules:
|
|||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
|
|
@ -5,11 +5,11 @@ like StatefulSet, Deployment, DaemonSet for instances. While at the same time, m
|
|||
express more and more diverse requirements for workload upgrade and deployment, which
|
||||
in many cases, cannot be satisfied by the default workload controllers.
|
||||
|
||||
Kruise attempts to fill such gap by offering a set of controllers as the supplement
|
||||
Kruise attempts to fill such a gap by offering a set of controllers as the supplement
|
||||
to manage new workloads in Kubernetes. The target use cases are representative,
|
||||
originally collected from the users of Alibaba cloud container services and the
|
||||
developers of the in-house large scale on-line/off-line container applications.
|
||||
Most of the use cases can be easily applied to other similar cloud user scenarios.
|
||||
Most of the use cases can be easily applied to other similar scenarios for cloud users.
|
||||
|
||||
Currently, Kruise supports the following workloads.
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 170 KiB |
|
@ -99,7 +99,7 @@ spec:
|
|||
|
||||
the number of creating ephemeralcontainer will not exceed replicas.
|
||||
|
||||
1. parallelism means parallel running ephemeral containeres.
|
||||
1. parallelism means parallel running ephemeral containers.
|
||||
|
||||
the value of parallelism cannot exceed 10.
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ Add webhooks for ImageListPullJob, validating-webhook and mutating-webhook.
|
|||
|
||||
The `ImageListPullJob` will be deleted , when the condition is: `CompletionPolicy.Type=Always && CompletionPolicy.TTLSecondsAfterFinished>0`.
|
||||
|
||||
Calculate the latest status of the `ImageListPullJob` from all `ImagePullJob`s which is ownerd by the `ImageListPullJob`.
|
||||
Calculate the latest status of the `ImageListPullJob` from all `ImagePullJob`s which is owned by the `ImageListPullJob`.
|
||||
|
||||
1),Create an `ImagePullJob` based on `ImageListPullJob.Spec.Image`.
|
||||
|
||||
|
|
|
@ -0,0 +1,235 @@
|
|||
---
|
||||
title: AdvancedStatefulSetVolumeResize
|
||||
authors:
|
||||
- "@Abner-1"
|
||||
reviewers:
|
||||
- "@furykerry"
|
||||
- "@zmberg"
|
||||
creation-date: 2024-06-26
|
||||
last-updated: 2024-06-26
|
||||
status:
|
||||
---
|
||||
|
||||
# Advanced StatefulSet 支持卷变配
|
||||
|
||||
## 目录
|
||||
* [Advanced StatefulSet 支持卷变配](#advanced-statefulset-支持卷变配)
|
||||
* [目录](#目录)
|
||||
* [Motivation](#motivation)
|
||||
* [用户场景](#用户场景)
|
||||
* [用户失败恢复场景](#用户失败恢复场景)
|
||||
* [本质问题](#本质问题)
|
||||
* [目标](#目标)
|
||||
* [非目标](#非目标)
|
||||
* [Proposal](#proposal)
|
||||
* [API 修改](#api-修改)
|
||||
* [增加 webhook 校验](#增加-webhook-校验)
|
||||
* [PVC 调谐过程修改](#pvc-调谐过程修改)
|
||||
* [原地变配 pvc 更新失败后怎么办?](#原地变配-pvc-更新失败后怎么办)
|
||||
* [如何认定更新失败](#如何认定更新失败)
|
||||
* [失败后处理流程](#失败后处理流程)
|
||||
* [方案A](#方案a)
|
||||
* [方案B](#方案b)
|
||||
* [Implementation](#implementation)
|
||||
* [为什么选择延续 KEP-661 的思路不追踪 vct 的历史版本?](#为什么选择延续-kep-661-的思路不追踪-vct-的历史版本)
|
||||
* [为什么需要增加 VolumeClaimUpdateStrategy 字段,而不是完全参照 KEP-661?](#为什么需要增加-volumeclaimupdatestrategy-字段而不是完全参照-kep-661)
|
||||
|
||||
## Motivation
|
||||
|
||||
目前已经支持了 CloneSet Volume Claim Templates 变动时触发 pod 重建,使 pvc 重新 reconcile 达成变配。
|
||||
|
||||
但是对于有状态应用这样的策略过于激进,需要完善 advanced Stateful Set 对于 Volume Claim Templates 变动的更新策略。
|
||||
|
||||
asts 现在对 Volume Claim Templates 变动完全不关注,只对新的 pod 进行 reconcile。
|
||||
|
||||
### 用户场景
|
||||
|
||||
1. **[H]** 对可支持变配的 StorageClass 的场景, 可以直接 edit pvc storage 字段增加规格大小(不支持减少)
|
||||
2. 对不支持变配的 StorageClass 的场景,需要确保已有 pvc 内容不再需要后可(手动/自动)删除 pvc 和 pod,新 reconcile 出来的 pvc 和 pod 就可以使用最新的配置 (完善用户场景, 该场景理论上是需要)
|
||||
a. 部分消费类场景,使用一段时间后磁盘会有一部分碎片,有时候会在消费完成后 recreate 以提高性能 (sts删除后重建是不是也可以)
|
||||
3. 对需要更改 StorageClass 的场景,操作和场景 2 类似
|
||||
a. 更改 ssd -> essd / 迁移上云等
|
||||
|
||||
### 用户失败恢复场景
|
||||
|
||||
在场景 1 的操作过程中可能遇到一些意外情况,如修改 Volume Claim Templates 因为各种情况在某个 pod 操作失败了。此时用户会有这几种恢复期望:
|
||||
1. 完全回滚到配置修改之前,如同时修改镜像和卷规格,但新镜像有问题,想完全回滚(KEP-661不解决)
|
||||
2. **[H]** 部分回滚到配置修改之前,如同时修改镜像和卷规格,但新镜像有问题,想回滚到旧镜像,但使用新的卷规格 (KEP-661解决)
|
||||
3. 不回滚配置,解决异常 pod 的失败问题
|
||||
a. 如变配符合预期但在某个节点存储 quota 不足,与管理员沟通后增加 quota
|
||||
b. 如变配符合预期但在某个节点底层资源存储不足,希望漂移到资源充足的节点上
|
||||
4. 重新修改配置,如修改卷规格10G -> 100G,因为存储 quota 不足,希望改成 10G -> 20G: (假设10个更新到第5个实例失败)前4个 pod 需要 100G -> 20G, 后6个需要10G -> 20G (KEP-1790)
|
||||
|
||||
### 本质问题
|
||||
|
||||
1. 如何识别不能原地变配的场景 ?
|
||||
|
||||
2. 有没有可能设计一种机制来识别pvc已无数据安全问题,从而让删除 pvc 的过程自动化,进而将自动化 pvc 重建的过程(KEP 4650 定义的机制更侧重将错误处理交由上层用户/平台)
|
||||
|
||||
1. 如果有这样一种机制,和 delete pvc 的区别在哪?
|
||||
- delete pvc 是一个和 api 做交互的接口,有权限要求;这种机制可以是某个组件识别到 pvc 状态/pod 内存储状态后打标,是一个实况上报的功能,权限要求较低
|
||||
- sts 控制器可以结合并发度和调谐进度来做 delete pvc 的实际决策
|
||||
2. 这样的一个机制是否可以和 volumeSnapShot 的流程结合起来考虑?
|
||||
|
||||
3. 用户想要的迁移 PVC 的机制究竟是什么样的?一个灵活的机制(KEP-4650)还是受限但通用的解决方案?
|
||||
|
||||
### 目标
|
||||
|
||||
1. 希望在 sc 支持容量扩展的前提下扩展 Volume Claim Templates 规格可以自动化操作
|
||||
2. 确保用户可以知道 pvc 的变配是否完成或发生错误
|
||||
3. 不阻碍用户尝试从异常情况下进行恢复
|
||||
4. 在打开 RecoverVolumeExpansionFailure feature gate 的集群中,允许用户达成恢复期望4
|
||||
5. 【Nice to have】不支持容量扩展的 storage class,由用户确保原有云盘内数据不需要后调整 Volume Claim Templates 配置 + 显式指定 pvc 更新方式,可自动删除重建
|
||||
- 如何区分是因为更新导致的重建,而不是异常场景驱逐后的重建 -- 参考 clone set 在delete pod 里处理
|
||||
- 替代方案:既然用户知道,理论上可以等待手动删除后重建(即 OnDelete)
|
||||
|
||||
### 非目标
|
||||
|
||||
1. 不实现 kep 1790
|
||||
2. 不实现 volume claim 的版本管理和跟踪,详细影响[为什么选择延续 KEP-661 的思路不追踪 vct 的历史版本?](#为什么选择延续-kep-661-的思路不追踪-vct-的历史版本)
|
||||
1. 不实现只改动 volume claim 的修改,此需求可以用运维手段来实现。
|
||||
3. 不实现和标识 pvc 可删除联动的调谐机制
|
||||
4. 不实现结合 VolumeSnapshot 做备份迁移的机制
|
||||
|
||||
|
||||
## Proposal
|
||||
|
||||
### API 修改
|
||||
|
||||
1. 在StatefulSet spec中引入一个新字段:`volumeClaimUpdateStrategy`
|
||||
来指定何时协调PVC和Pod的更新。可能的值有:
|
||||
- `OnDelete`:默认值,仅在旧PVC被删除时更新PVC。
|
||||
- `OnPodRollingUpdate`: 在滚动更新 Pod 时更新 PVC
|
||||
|
||||
> 详细可见 [为什么需要增加 VolumeClaimUpdateStrategy 字段,而不是完全参照 KEP-661?](#为什么需要增加-volumeclaimupdatestrategy-字段而不是完全参照-kep-661)
|
||||
2. **在 StatefulSet `status`** 引入一个数组字段 `volumeClaimTemplates`:
|
||||
- **`status.volumeClaimTemplates[x].templateName`**: 显示追踪的 `spec.volumeClaimTemplates` 的 pvc (template)name,这个在 vcts 一定唯一,方便用户排查问题
|
||||
- **Note**: indexId 不直观且 vcts 允许删减,难以追踪。
|
||||
- **`status.volumeClaimTemplates[x].compatibleReplicas`**: 是当前 vct 已兼容/已更新的副本数
|
||||
- **`status.volumeClaimTemplates[x].compatibleReadyReplicas`**: 是当前 vct 已兼容/已成功更新的副本数
|
||||
|
||||
当识别到 status.observedGeneration == spec.generation,且每一个 volumeClaimTemplates 数组中 compatibleReadyReplicas == compatibleReplicas 时则认为已经更新完成。
|
||||
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
volumeClaimUpdateStrategy: OnDelete # new field
|
||||
# OnDelete:默认值,仅在旧PVC被删除时更新PVC
|
||||
# OnPodRollingUpdate:在滚动更新 Pod 时更新 PVC
|
||||
|
||||
status:
|
||||
availableReplicas: 3
|
||||
collisionCount: 0
|
||||
currentReplicas: 3
|
||||
currentRevision: ex1-54c5bd476c
|
||||
observedGeneration: 3
|
||||
readyReplicas: 3
|
||||
replicas: 3
|
||||
updateRevision: ex1-54c5bd476c
|
||||
updatedReplicas: 3
|
||||
volumeClaimTemplates: # new field
|
||||
- compatibleReplicas: 2 # 下发 resize 的副本数
|
||||
compatibleReadyReplicas: 1 # resize 状态成功的副本数
|
||||
templateName: vol1
|
||||
- compatibleReplicas: 3
|
||||
compatibleReadyReplicas: 3
|
||||
templateName: vol2
|
||||
```
|
||||
|
||||
|
||||
### 增加 webhook 校验
|
||||
通过 storage class 中 `allowVolumeExpansion` 字段判断是否支持扩展 pvc,但这个字段不一定真实反应 CSI 实际可扩展能力,需要由管理员自行保证,此处尽可能拦截非法请求。
|
||||
|
||||
1. Update Strategy为 `OnRollingUpdate` 时,如果对应 sc 支持扩展且 volumeClaimTemplates 不变或只是大小扩展,则通过。否则拒绝。
|
||||
- Q:如果先改成 OnDelete 的同时改 volumeClaimTemplates,后面再改成 OnRollingUpdate?
|
||||
- A:会在滚动更新过程中卡住,走异常更新流程即可
|
||||
|
||||
2. Update Strategy为 OnDelete 时,直接放过
|
||||
|
||||
|
||||
### PVC 调谐过程修改
|
||||
|
||||
1. 监听 pvc 的 update: 但不根据 owner reference(已经被用来实现 pvc 自动删除),而是在 annotation 上记录 sts 对象
|
||||
- Q:现有 pvc 没有 annotation 怎么监听?
|
||||
- A:只监听(新版本)更新过的 pvc 用于在 pvc 更新时触发新的调谐。
|
||||
2. update status 中增加对 pvc 的检验/版本管理
|
||||
3. rolling update 里在更新 pod 之前更新 pvc
|
||||
1. pvc 不 ready 时,也将该 pod 加入 unavailablePods
|
||||
2. expand 的时候更新 pvc
|
||||
3. 识别 expand 回滚的情况
|
||||
- 1790 不开的时候不卡
|
||||
- 1790 开启的时候下发 resize
|
||||
4. 更新完 pvc 后需要验证 pvc 是否 ready/处于 FileSystemResizePending 状态,如不满足则阻断
|
||||
|
||||
|
||||
### 原地变配 pvc 更新失败后怎么办?
|
||||
|
||||
#### 如何认定更新失败
|
||||
1. 明确的更新错误
|
||||
2. 不明确的可能重试成功的错误,等待一个 maxWaitTime (全局设置,默认值 60s),超时后认为更新失败
|
||||
|
||||
识别到失败后会在 sts 资源上打印错误 event
|
||||
|
||||
#### 失败后处理流程
|
||||
理论上 `OnPodRollingUpdate` 失败后需要用户进行介入,此时一般是需要重建 pvc (同时也意味着 pod 一定要重建)。
|
||||
|
||||
以三副本场景举例,在原地变配 pvc2 失败后,
|
||||
0. 识别到失败后会在 sts 资源上打印错误 event
|
||||
|
||||
失败处理流程有两种方案:
|
||||
##### 方案A
|
||||
|
||||
1. 删除 pod2, 同时给 pvc2 打上标签
|
||||
2. 识别到 pvc2 上的标签时不会进行新 pod2 的创建
|
||||
3. 等待 pvc2 兼容 且标签消除(可在识别到 pvc 兼容后自动去除)
|
||||
1. 此时用户介入预期会有几种情况
|
||||
- pvc2 数据不需要,delete pvc2
|
||||
- 下发一个新 job 挂载 pvc2,进行备份/快照,成功后 delete pvc2
|
||||
- 支持快照的 storage class 可以下发 `VolumeSnapShot` 资源,并在合适的时间还原
|
||||
4. (3.5)此时如果删除 pod0,会触发 pod0的重建,不会因为 pvc0 不兼容而卡住
|
||||
5. 兼容后再次创建 pod2,等待 pod2 ready 后更新下一个序号
|
||||
|
||||
|
||||
适用于明确不能更新的 pvc 场景,比如下发 patch 被拒绝等
|
||||
|
||||
但基于超时方案的失败识别可能会造成 pod 过早删除,导致只支持在线更新的 CSI 的 pvc 一直没法变配成功
|
||||
|
||||
##### 方案B
|
||||
|
||||
1. controller 在 patch pvc 后等待 pvc 变更完成
|
||||
2. 会一直等待 pvc 变更完成,一直卡住
|
||||
3. 此时用户识别到错误 event 进行介入
|
||||
1. 人工处理 pvc 变更完成
|
||||
2. 原 pvc 内数据不再需要,删除原 pvc(需要同时删除 pod 和 pvc),controller 自动创建出新 pvc
|
||||
3. 对原 pvc 内数据进行了备份/快照后,执行2
|
||||
4. 判断暂时没法处理,更改 `OnPodRollingUpdate` 为 `OnDelete`, 不再变动 pvc
|
||||
|
||||
一切都由上层用户判断,任何场景都适用
|
||||
|
||||
综合两种方案,方案1目前不能解决所有场景的边界问题,优先选择实现方案2,积累用户案例后可优化方案1。
|
||||
|
||||
### Implementation
|
||||
主体修改位于 `rollingUpdateStatefulsetPods` 函数
|
||||

|
||||
|
||||
|
||||
### 为什么选择延续 KEP-661 的思路不追踪 vct 的历史版本?
|
||||
现在 asts/cloneset 不在 controller revision 追踪 volumeClaimTemplates 的历史信息,只关注当前值,延续当前行为的主要原因:
|
||||
1. 将 vct 的信息加入 controller revision,意味着**如果只存在 vct 的改动也会触发 asts 的版本变动**
|
||||
1. 目前没有收集到相关的需求
|
||||
2. 对现有控制器流程影响比较大,涉及改动多,风险比较大
|
||||
3. 该需求可通过运行脚本来批量 patch 或通过下发一个 job 来解决
|
||||
|
||||
2. 直接回滚的操作可以通过上层重新下发配置解决,预想中的大部分场景是可以不回滚pvc配置(或不紧急)
|
||||
- 相较 expand pvc 的需求优先级较低,如有必要,可以后续演进
|
||||
|
||||
3. 加入历史版本跟踪,可以在尚未更新到某 pvc 时,即使 pvc 被删除也会被拉起到历史版本, 而非最新版本
|
||||
- pvc 数据还是没法恢复的,此时用户 delete 某个 pvc 的目的是为了拉起旧版本的 pvc 配置吗? 貌似没啥区别
|
||||
|
||||
在上述三个场景没有进一步反馈的情况下,考虑到复杂度,逐步演进暂不实现。
|
||||
|
||||
### 为什么需要增加 VolumeClaimUpdateStrategy 字段,而不是完全参照 KEP-661?
|
||||
1. sts 之前不允许修改 vct 任何字段,661 实现的是功能增强
|
||||
2. asts 之前允许修改 vct 任何字段,如只允许修改 size,无法保证以前的用户场景兼容。通过增加 VolumeClaimUpdateStrategy 字段来兼容之前的行为
|
||||
3. 可用于统一 cloneset 目前的 recreate 行为,便于理解
|
||||
4. 可用于未来可能的集合 VolumeSnapshot 的功能。
|
|
@ -0,0 +1,241 @@
|
|||
---
|
||||
title: AdvancedStatefulSetVolumeResize
|
||||
authors:
|
||||
- "@Abner-1"
|
||||
reviewers:
|
||||
- "@furykerry"
|
||||
- "@zmberg"
|
||||
creation-date: 2024-06-26
|
||||
last-updated: 2024-06-26
|
||||
status:
|
||||
---
|
||||
|
||||
# Advanced StatefulSet Volume Resize
|
||||
|
||||
## Table of Contents
|
||||
|
||||
A table of contents is helpful for quickly jumping to sections of a proposal and for highlighting
|
||||
any additional information provided beyond the standard proposal template.
|
||||
[Tools for generating](https://github.com/ekalinin/github-markdown-toc) a table of contents from markdown are available.
|
||||
|
||||
* [Table of Contents](#table-of-contents)
|
||||
* [Motivation](#motivation)
|
||||
* [User Story](#user-story)
|
||||
* [Failure Recovery User Story](#failure-recovery-user-story)
|
||||
* [Fundamental Issues](#fundamental-issues)
|
||||
* [Goal](#goal)
|
||||
* [None Goal](#none-goal)
|
||||
* [Proposal](#proposal)
|
||||
* [API Definition](#api-definition)
|
||||
* [Adding Webhook Validation](#adding-webhook-validation)
|
||||
* [Updating PVC Process](#updating-pvc-process)
|
||||
* [Handling In-place PVC Update Failures](#handling-in-place-pvc-update-failures)
|
||||
* [Reasons for Not Tracking Historical Versions of VolumeClaimTemplates per KEP-661](#reasons-for-not-tracking-historical-versions-of-volumeclaimtemplates-per-kep-661)
|
||||
* [Implementation](#implementation)
|
||||
|
||||
## Motivation
|
||||
|
||||
We have recently implemented a feature that triggers pod reconstruction when there are changes to
|
||||
the `VolumeClaimTemplates` in `CloneSet`. This leads to the reconciliation of Persistent Volume Claims (PVCs) and the
|
||||
implementation of reconfiguration. However, this approach may be too aggressive for stateful applications. Therefore, it
|
||||
is essential to refine the update strategy for `AdvancedStatefulSet` in response to changes in `VolumeClaimTemplates`.
|
||||
|
||||
### User Story
|
||||
|
||||
The current behavior of `AdvancedStatefulSet` is to disregard changes in `VolumeClaimTemplates`, reconciling only for
|
||||
the creation of new pods. Users may encounter situations such as:
|
||||
|
||||
1. **[H]** In cases where StorageClasses support expansion, users can directly edit the PVC's storage capacity to
|
||||
increase it (decreasing is not supported).
|
||||
2. For StorageClasses that do not support expansion, users must ensure that the contents of existing PVCs are no longer
|
||||
needed before manually or automatically deleting the PVC and associated pods. New PVCs and pods will then be
|
||||
reconciled with the latest configuration. (This scenario requires refinement as it is theoretically necessary.)
|
||||
- In some use cases, disk fragmentation may occur over time, prompting users to recreate resources to enhance
|
||||
performance. (Would deleting and recreating the StatefulSet be beneficial here?)
|
||||
3. For scenarios requiring a change in StorageClass, the process is similar to scenario 2:
|
||||
- Examples include transitioning from SSD to ESSD or migrating to cloud storage.
|
||||
|
||||
### Failure Recovery User Story
|
||||
|
||||
In scenario 1, unexpected issues may arise, such as a failure in modifying `VolumeClaimTemplates` for a specific pod.
|
||||
Users may expect the following recovery options:
|
||||
|
||||
1. Complete rollback to the previous configuration, for instance, when both the image and volume specifications are
|
||||
altered, and the new image is defective, users desire a full rollback (not covered by KEP-661).
|
||||
2. **[H]** Partial rollback to the previous configuration, for example, when both the image and volume specifications
|
||||
are changed, but users wish to revert to the old image while retaining the new volume specifications (addressed by
|
||||
KEP-661).
|
||||
3. Configuration should not be rolled back, but the issue with the failed pod should be resolved:
|
||||
- When reconfiguration is expected but the storage quota on a specific node is insufficient. After consultation with
|
||||
the administrator, the quota is increased.
|
||||
- When reconfiguration is anticipated but the underlying resources on a specific node are inadequate. The pod should
|
||||
be relocated to a node with sufficient resources.
|
||||
4. Reconfigure the settings again, such as adjusting volume specifications from 10G to 100G, but due to storage quota
|
||||
constraints, change to 10G to 20G. (Assuming an update of 10 instances fails at the 5th instance) The first 4 pods
|
||||
need to revert from 100G to 20G, and the next 6 need to adjust from 10G to 20G (as per KEP-1790).
|
||||
|
||||
### Fundamental Issues
|
||||
|
||||
1. How can we identify situations where patching pvc is impractical? (KEP-661)
|
||||
2. Can we develop a mechanism to automatically and safely delete PVCs, thereby streamlining the PVC deletion process? (
|
||||
KEP-4650 focuses more on delegating error handling to higher-level users or platforms)
|
||||
- If such a mechanism exists, how does it differ from manual PVC deletion?
|
||||
- `delete PVC` is an API interface that requires specific permissions.
|
||||
- This mechanism could involve a component that identifies and marks PVC or pod storage states, acting as a
|
||||
real-time monitoring tool.
|
||||
- The `StatefulSet` controller could then make actual deletion decisions based on concurrency and progress
|
||||
tuning.
|
||||
3. What kind of PVC migration mechanism do users truly require? A flexible approach as proposed in KEP-4650 or a more
|
||||
standardized yet broadly applicable solution?
|
||||
|
||||
### Goal
|
||||
|
||||
1. **Automated Expansion**: Enable automated expansion of `VolumeClaimTemplates` specifications, provided the
|
||||
StorageClass supports capacity expansion.
|
||||
2. **Completion and Error Awareness**: Ensure users can know whether the PVC expansion is complete or if any errors have
|
||||
occurred.
|
||||
3. **Unobstructed Recovery Attempts**: Do not obstruct users from attempting recovery in abnormal situations.
|
||||
4. **Recovery Expectation in Certain Clusters**: In clusters where the `RecoverVolumeExpansionFailure` feature gate is
|
||||
enabled, allow users to achieve recovery expectation 4.
|
||||
5. **Better Handling for Non-Supportive StorageClasses**: When the StorageClass does not support capacity expansion and
|
||||
the user has ensured that existing disk data does not need to be preserved, allow automatic deletion and recreation
|
||||
of PVCs by adjusting `VolumeClaimTemplates` configuration and explicitly specifying the update method.
|
||||
- Since users are aware of this, they can theoretically wait for manual deletion and reconstruction (OnDelete).
|
||||
- Q: How to distinguish between reconstructions due to updates and those due to evictions in abnormal scenarios? A:
|
||||
Refer to CloneSet handling in delete pod
|
||||
|
||||
### None Goal
|
||||
|
||||
1. **Do Not Implement KEP-1790**.
|
||||
2. **No Version Management**: Do not implement version management and tracking for volume claims.
|
||||
|
||||
## Proposal
|
||||
|
||||
### API Definition
|
||||
|
||||
1. **Introduction of `volumeClaimUpdateStrategy` in StatefulSet `spec`**. The possible values include:
|
||||
- `OnDelete`: The default value. PVCs are updated only when the old PVC is deleted.
|
||||
- `InPlace`: Updates the PVC in place, encompassing the behavior of `OnDelete`.
|
||||
- `ReCreate`: This may integrate the current behavior of PVCs in `CloneSet`.
|
||||
|
||||
2. **Introduction of `volumeClaimSyncStrategy` in StatefulSet `spec.updateStrategy.rollingUpdate`**. The possible values include:
|
||||
- `Async`: The default value. Maintains the current behavior and is only configurable when `volumeClaimUpdateStrategy` is set to `OnDelete`.
|
||||
- `LockStep`: PVCs are updated first, followed by the Pods. Further details are provided below.
|
||||
|
||||
3. **Introduction of `volumeClaimTemplates` in StatefulSet `status`**:
|
||||
- **`status.volumeClaimTemplates[x].templateName`**: Displays the name of the `spec.volumeClaimTemplates` PVC (template) being tracked. This name must be unique within `volumeClaimTemplates`, facilitating easier troubleshooting for users.
|
||||
- **Note**: IndexId is not used because it's less intuitive, and since `volumeClaimTemplates` allows for deletion and reduction, tracking with IndexId could be complex.
|
||||
- **`status.volumeClaimTemplates[x].compatibleReplicas`**: Indicates the number of replicas for the current `volumeClaimTemplate` that have been compatible or updated.
|
||||
- **`status.volumeClaimTemplates[x].compatibleReadyReplicas`**: Indicates the number of replicas for the current `volumeClaimTemplate` that have been compatible or successfully updated.
|
||||
- **`status.volumeClaimTemplates[x].finishedReconciliationGeneration`**: Shows the generation number when the PVC reconciliation was completed. For example, for the two `volumeClaimTemplates` mentioned:
|
||||
- The first one, because not all replicas are ready, remains at generation 2.
|
||||
- The second one, with all replicas ready, is synchronized with the current generation of the `StatefulSet`.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
volumeClaimUpdateStrategy: OnDelete
|
||||
# OnDelete:Default value. PVCs are updated only when the old PVC is deleted.
|
||||
# InPlace:Updates the PVC in place. Also includes the behavior of OnDelete.
|
||||
# ReCreate (May integrate the current behavior of PVCs in CloneSet)
|
||||
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
volumeClaimSyncStrategy: Async
|
||||
# Async:Default value. Maintains current behavior. Only configurable when volumeClaimUpdateStrategy is set to OnDelete.
|
||||
# LockStep:PVCs are updated first, followed by Pods. More details provided below.
|
||||
|
||||
status:
|
||||
availableReplicas: 3
|
||||
collisionCount: 0
|
||||
currentReplicas: 3
|
||||
currentRevision: ex1-54c5bd476c
|
||||
observedGeneration: 3
|
||||
readyReplicas: 3
|
||||
replicas: 3
|
||||
updateRevision: ex1-54c5bd476c
|
||||
updatedReplicas: 3
|
||||
volumeClaimTemplates: # new field
|
||||
- finishedReconciliationGeneration: 2
|
||||
updatedReadyReplicas: 1 # resize 状态成功的副本数
|
||||
updatedReplicas: 2 # 下发 resize 的副本数
|
||||
templateName: vol1
|
||||
# 当 updatedReadyReplicas == spec.replicas时,
|
||||
# 调整 finishedReconciliationGeneration 为 sts 的 generation
|
||||
- finishedReconciliationGeneration: 3
|
||||
updatedReadyReplicas: 3
|
||||
updatedReplicas: 3
|
||||
templateName: vol2
|
||||
```
|
||||
|
||||
> Q: Why do we need to introduction these two fields in spec?
|
||||
>
|
||||
>A: The modification of VolumeClaimTemplates in Advanced StatefulSet has always been overlooked. If we completely follow
|
||||
> KEP-661, it could reduce API flexibility and make some current user scenarios unusable. To achieve forward
|
||||
> compatibility, there are two approaches: "adding these two fields to increase the flexibility of VolumeClaimTemplates
|
||||
> tuning" and "using a feature gate for global management of webhook interception and tuning steps".
|
||||
>
|
||||
> Tentatively, the approach of "adding these two fields to increase the flexibility of VolumeClaimTemplates tuning" is
|
||||
> selected.
|
||||
|
||||
### Adding Webhook Validation
|
||||
|
||||
Webhook validation can be implemented using the `allowVolumeExpansion` field in the `StorageClass` to ascertain whether PVC expansion is supported. However, this field may not always reflect the true capabilities of the CSI.
|
||||
|
||||
1. **Update Strategy set to InPlace**:
|
||||
- If the associated `StorageClass` permits expansion and the `VolumeClaimTemplates` are unchanged or only increase in size, the update is permitted. In other cases, it is denied.
|
||||
- **Question**: What happens if we switch to `OnDelete`, alter the `VolumeClaimTemplates`, and then revert to `InPlace`?
|
||||
- **Answer**: The rolling update process may become stuck, initiating an abnormal update sequence.
|
||||
|
||||
2. **Update Strategy set to OnDelete**:
|
||||
- Updates are directly permitted without additional checks.
|
||||
|
||||
### Updating PVC Process
|
||||
|
||||
1. **Monitor PVC Changes**:
|
||||
- Changes to PVCs are monitored using annotations instead of owner references, which are already utilized for automatic PVC deletion. This method records associated `StatefulSet` objects.
|
||||
|
||||
2. **Update Status**:
|
||||
- Introduce validation and version management for PVCs in the update status to ensure accurate tracking.
|
||||
|
||||
3. **Update PVC before Pod in Rolling Update**:
|
||||
- During an expansion, the PVC should be updated first.
|
||||
- Implement strategies to manage rollback scenarios for expansions:
|
||||
- If KEP-1790 is not enabled, prevent the process from getting stuck.
|
||||
- If KEP-1790 is enabled, allow for resizing to proceed.
|
||||
|
||||
### Handling In-place PVC Update Failures
|
||||
|
||||
In theory, after an `InPlace + LockStep` failure, user intervention is typically necessary, often involving the creation of a new PVC and Pod.
|
||||
|
||||
The ideal response would be to revert to the `OnDelete + LockStep` process. For example, using a three-replica scenario where `PVC2` fails to update in place:
|
||||
|
||||
1. Delete `Pod2` and simultaneously apply a label to `PVC2`.
|
||||
2. Detect the label on `PVC2` to prevent the creation of a new `Pod2`.
|
||||
3. Await `PVC2` compatibility and label removal (which can be automated once compatibility is confirmed).
|
||||
- **User intervention scenarios**:
|
||||
- If the data on `PVC2` is expendable: Delete `PVC2`.
|
||||
- If backup is required: create a job mounting `PVC2` to perform necessary operations. After completion, delete `PVC2`.
|
||||
4. If `Pod0` is deleted in the meantime, it should be recreated without being hindered by `PVC0` incompatibility.
|
||||
5. Once compatibility is restored, recreate `Pod2`. After `Pod2` is ready, proceed to update the subsequent replicas.
|
||||
|
||||
### Reasons for Not Tracking Historical Versions of `VolumeClaimTemplates` per KEP-661
|
||||
|
||||
Currently, `AdvancedStatefulSet/CloneSet` do not track historical `VolumeClaimTemplates` in controller revisions, focusing on current values. This approach is maintained for the following reasons:
|
||||
|
||||
1. **Impact on Controller Revision**:
|
||||
- Incorporating `VolumeClaimTemplates` in controller revisions would trigger version changes in `AdvancedStatefulSet` even with mere modifications to `VolumeClaimTemplates`. This could significantly disrupt existing controller processes, necessitating extensive modifications and posing high risks.
|
||||
|
||||
2. **Handling Rollbacks via Upper Layer**:
|
||||
- Rollback operations can typically be managed by reapplying configurations from the upper layer. In most scenarios, reverting PVC configurations (or the lack of urgency for rollbacks) is deemed sufficient.
|
||||
- The demand for expanding PVCs is more pressing. Future evolution can be considered based on necessity.
|
||||
|
||||
3. **Historical Version Tracking and PVC Deletion**:
|
||||
- With historical version tracking, an un-updated PVC would revert to an older version (instead of the latest) upon deletion.
|
||||
- **Data Integrity**: The restoration of PVC data is still not guaranteed. If a user deletes a PVC, is the intention to reinstate an older version configuration? The distinction appears negligible.
|
||||
|
||||
In the absence of specific user scenarios, the core advantage of reverting to historical version configurations over the latest ones is unclear, particularly in cases of data loss for persistent storage.
|
||||
|
||||
### Implementation
|
||||
Main modification is in the `rollingUpdateStatefulsetPods` Function.
|
||||

|
||||
|
|
@ -24,7 +24,7 @@ superseded-by:
|
|||
<!-- BEGIN Remove before PR -->
|
||||
To get started with this template:
|
||||
1. **Make a copy of this template.**
|
||||
Copy this template into `docs/enhacements` and name it `YYYYMMDD-my-title.md`, where `YYYYMMDD` is the date the proposal was first drafted.
|
||||
Copy this template into `docs/proposals` and name it `YYYYMMDD-my-title.md`, where `YYYYMMDD` is the date the proposal was first drafted.
|
||||
1. **Fill out the required sections.**
|
||||
1. **Create a PR.**
|
||||
Aim for single topic PRs to keep discussions focused.
|
||||
|
@ -188,4 +188,3 @@ Consider the following in developing an upgrade strategy for this enhancement:
|
|||
- [ ] MM/DD/YYYY: First round of feedback from community
|
||||
- [ ] MM/DD/YYYY: Present proposal at a [community meeting]
|
||||
- [ ] MM/DD/YYYY: Open proposal PR
|
||||
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
module github.com/openkruise/kruise/docs/tutorial/v1/images/guestbook
|
||||
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/codegangsta/negroni v1.0.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/xyproto/simpleredis v0.0.0-20200201215242-1ff0da2967b4
|
||||
)
|
|
@ -0,0 +1,5 @@
|
|||
github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/xyproto/pinterface v0.0.0-20200201214933-70763765f31f/go.mod h1:X5B5pKE49ak7SpyDh5QvJvLH9cC9XuZNDcl5hEyYc34=
|
||||
github.com/xyproto/simpleredis v0.0.0-20200201215242-1ff0da2967b4/go.mod h1:U/ZOQqa0ggBGPs+d0y7r50BY6FyFTh5WhWf7F8f1MBM=
|
|
@ -1,9 +1,9 @@
|
|||
# Notice of Embargo
|
||||
|
||||
This is an embargoed notification that a vulnerability has been discovered in
|
||||
<!-- TODO: $PROJECT -->. This notice has been sent to subscribed distributors and service
|
||||
OpenKruise/Kruise. This notice has been sent to subscribed distributors and service
|
||||
providers in order to allow for timely patching. You are receiving this
|
||||
notification as you have agreed to abide by the embargo policy (<!-- TODO: $LINK -->) on this
|
||||
notification as you have agreed to abide by the embargo policy (https://github.com/openkruise/kruise/security/policy) on this
|
||||
project. Do not forward this information to other parties without complying with
|
||||
the instructions of the embargo policy.
|
||||
|
||||
|
@ -57,4 +57,4 @@ when it will be available or links to where the patch will be available.*
|
|||
* issue public patches before the disclosure date
|
||||
|
||||
This list will be notified immediately if the disclosure date is at risk or
|
||||
changes. Questions should be directed to the security contacts <!-- TODO: $LINK -->.
|
||||
changes. Questions should be directed to the security contacts at kubernetes-security@service.aliyun.com.
|
||||
|
|
266
go.mod
266
go.mod
|
@ -1,207 +1,179 @@
|
|||
module github.com/openkruise/kruise
|
||||
|
||||
go 1.20
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/alibaba/pouch v0.0.0-20190328125340-37051654f368
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6
|
||||
github.com/appscode/jsonpatch v1.0.1
|
||||
github.com/codegangsta/negroni v1.0.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/docker v26.1.4+incompatible
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible
|
||||
github.com/google/go-containerregistry v0.16.1
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.33.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/onsi/gomega v1.36.1
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/xyproto/simpleredis v0.0.0-20200201215242-1ff0da2967b4
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/time v0.7.0
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||
google.golang.org/grpc v1.63.0
|
||||
k8s.io/api v0.30.0
|
||||
k8s.io/apiextensions-apiserver v0.30.0
|
||||
k8s.io/apimachinery v0.30.0
|
||||
k8s.io/apiserver v0.28.9
|
||||
k8s.io/client-go v0.28.9
|
||||
k8s.io/code-generator v0.28.9
|
||||
k8s.io/component-base v0.28.9
|
||||
k8s.io/component-helpers v0.28.9
|
||||
k8s.io/cri-api v0.28.9
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.32.6
|
||||
k8s.io/apiextensions-apiserver v0.32.6
|
||||
k8s.io/apimachinery v0.32.6
|
||||
k8s.io/apiserver v0.32.6
|
||||
k8s.io/client-go v0.32.6
|
||||
k8s.io/code-generator v0.32.6
|
||||
k8s.io/component-base v0.32.6
|
||||
k8s.io/component-helpers v0.32.6
|
||||
k8s.io/cri-api v0.32.6
|
||||
k8s.io/cri-client v0.32.6
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9
|
||||
k8s.io/kubelet v0.28.9
|
||||
k8s.io/kubernetes v1.28.9
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.16.5
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f
|
||||
k8s.io/kubelet v0.32.6
|
||||
k8s.io/kubernetes v1.32.6
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.20.2
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/google/cel-go v0.16.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.25.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
k8s.io/controller-manager v0.28.9 // indirect
|
||||
k8s.io/kms v0.28.9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||
k8s.io/controller-manager v0.32.6 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/kms v0.32.6 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/contiv/executor v0.0.0-20180626233236-d263f4daa3ad // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v24.0.0+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.2 // indirect
|
||||
github.com/go-openapi/errors v0.20.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.1 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/validate v0.21.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomodule/redigo v2.0.0+incompatible // indirect
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.1 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
||||
github.com/opencontainers/runc v1.2.0-rc.1 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||
github.com/xyproto/pinterface v0.0.0-20200201214933-70763765f31f // indirect
|
||||
go.mongodb.org/mongo-driver v1.7.5 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.17.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/oauth2 v0.24.1-0.20250104024449-49a531d12a9a // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/cloud-provider v0.28.9 // indirect
|
||||
k8s.io/csi-translation-lib v0.28.9 // indirect
|
||||
k8s.io/dynamic-resource-allocation v0.28.9 // indirect
|
||||
k8s.io/kube-scheduler v0.28.9 // indirect
|
||||
k8s.io/mount-utils v0.28.9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/cloud-provider v0.32.0 // indirect
|
||||
k8s.io/csi-translation-lib v0.32.0 // indirect
|
||||
k8s.io/dynamic-resource-allocation v0.32.0 // indirect
|
||||
k8s.io/kube-scheduler v0.32.0 // indirect
|
||||
k8s.io/mount-utils v0.32.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
golang.org/x/sys => golang.org/x/sys v0.19.0
|
||||
k8s.io/api => k8s.io/api v0.28.9
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.9
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.28.9
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.28.9
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.9
|
||||
k8s.io/client-go => k8s.io/client-go v0.28.9
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.9
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.9
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.28.9
|
||||
k8s.io/component-base => k8s.io/component-base v0.28.9
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.28.9
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.28.9
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.28.9
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.9
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.9
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.28.9
|
||||
k8s.io/kms => k8s.io/kms v0.28.9
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.9
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.9
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.9
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.9
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.28.9
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.28.9
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.9
|
||||
k8s.io/metrics => k8s.io/metrics v0.28.9
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.28.9
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.9
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.9
|
||||
k8s.io/api => k8s.io/api v0.32.6
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.6
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.32.6
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.32.6
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.6
|
||||
k8s.io/client-go => k8s.io/client-go v0.32.6
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.6
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.6
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.32.6
|
||||
k8s.io/component-base => k8s.io/component-base v0.32.6
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.32.6
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.32.6
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.32.6
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.32.6
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.6
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.6
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.32.6
|
||||
k8s.io/externaljwt => k8s.io/externaljwt v0.32.6
|
||||
k8s.io/kms => k8s.io/kms v0.32.6
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.6
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.6
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.9
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.32.6
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.32.6
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.32.6
|
||||
k8s.io/metrics => k8s.io/metrics v0.32.6
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.32.6
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.6
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.6
|
||||
)
|
||||
|
|
574
go.sum
574
go.sum
|
@ -1,157 +1,85 @@
|
|||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
|
||||
cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alibaba/pouch v0.0.0-20190328125340-37051654f368 h1:g7IkWP/KYyxamFreSPpOmjGuX63C6mTSqVFd9BldwxQ=
|
||||
github.com/alibaba/pouch v0.0.0-20190328125340-37051654f368/go.mod h1:U18Kv0/rJR1OjsxqgEbMJU29pFSCpKCzr44GZD6T3dI=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/appscode/jsonpatch v1.0.1 h1:e82Bj+rsBSnpsmjiIGlc9NiKSBpJONZkamk/F8GrCR0=
|
||||
github.com/appscode/jsonpatch v1.0.1/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/codegangsta/negroni v1.0.0 h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY=
|
||||
github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
|
||||
github.com/contiv/executor v0.0.0-20180626233236-d263f4daa3ad h1:wNE0NGGDF+q69bwWeJ/+kKTjgRux3Ky3xTJh6bXHq1Y=
|
||||
github.com/contiv/executor v0.0.0-20180626233236-d263f4daa3ad/go.mod h1:pTKlpemhbXTX+6oDaIiCv1ZiMm5S9ieCpo4YFMJoa40=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM=
|
||||
github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU=
|
||||
github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE=
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
|
||||
github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU=
|
||||
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
|
||||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
|
||||
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
|
||||
github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
|
||||
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
|
||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os=
|
||||
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI=
|
||||
github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
|
||||
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
|
||||
github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
|
||||
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
|
||||
github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
|
||||
github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
|
||||
github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
|
||||
github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
|
||||
github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
|
||||
github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
|
||||
github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
|
||||
github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
|
||||
github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
|
||||
github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
|
||||
github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
|
||||
github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
|
||||
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
|
||||
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
|
@ -159,341 +87,255 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
|||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
|
||||
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
|
||||
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
|
||||
github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
||||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
|
||||
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
|
||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
|
||||
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.2.0-rc.1 h1:SMjop2pxxYRTfKdsigna/8xRoaoCfIQfD2cVuOb64/o=
|
||||
github.com/opencontainers/runc v1.2.0-rc.1/go.mod h1:m9JwxfHzXz5YTTXBQr7EY9KTuazFAGPyMQx2nRR3vTw=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
||||
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
|
||||
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
|
||||
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xyproto/pinterface v0.0.0-20200201214933-70763765f31f h1:x97Isxzsv8Aj1QMh8vNxKzI85Fvgg2areAJlIaMQ4zY=
|
||||
github.com/xyproto/pinterface v0.0.0-20200201214933-70763765f31f/go.mod h1:X5B5pKE49ak7SpyDh5QvJvLH9cC9XuZNDcl5hEyYc34=
|
||||
github.com/xyproto/simpleredis v0.0.0-20200201215242-1ff0da2967b4 h1:0wSySfZ5KkGNlWJFd4Bkfv/T4rlEsUp7o7QwHTTvEzg=
|
||||
github.com/xyproto/simpleredis v0.0.0-20200201215242-1ff0da2967b4/go.mod h1:U/ZOQqa0ggBGPs+d0y7r50BY6FyFTh5WhWf7F8f1MBM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
|
||||
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
|
||||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
|
||||
go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.24.1-0.20250104024449-49a531d12a9a h1:ZS62xVARqh4jGWGOmCkR/908OsNHQYP4F8MbAXQW4rE=
|
||||
golang.org/x/oauth2 v0.24.1-0.20250104024449-49a531d12a9a/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
|
||||
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
|
@ -505,68 +347,66 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
k8s.io/api v0.28.9 h1:E7VEXXCAlSrp+08zq4zgd+ko6Ttu0Mw+XoXlIkDTVW0=
|
||||
k8s.io/api v0.28.9/go.mod h1:AnCsDYf3SHjfa8mPG5LGYf+iF4mie+3peLQR51MMCgw=
|
||||
k8s.io/apiextensions-apiserver v0.28.9 h1:yzPHp+4IASHeu7XIPkAKJrY4UjWdjiAjOcQMd6oNKj0=
|
||||
k8s.io/apiextensions-apiserver v0.28.9/go.mod h1:Rjhvq5y3JESdZgV2UOByldyefCfRrUguVpBLYOAIbVs=
|
||||
k8s.io/apimachinery v0.28.9 h1:aXz4Zxsw+Pk4KhBerAtKRxNN1uSMWKfciL/iOdBfXvA=
|
||||
k8s.io/apimachinery v0.28.9/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
|
||||
k8s.io/apiserver v0.28.9 h1:koPXvgSXRBDxKJQjJGdZNgPsT9lQv6scJJFipd1m86E=
|
||||
k8s.io/apiserver v0.28.9/go.mod h1:D51I37WBZojJhmLcjNVE4GSVrjiUHP+yq+N5KvKn2wY=
|
||||
k8s.io/client-go v0.28.9 h1:mmMvejwc/KDjMLmDpyaxkWNzlWRCJ6ht7Qsbsnwn39Y=
|
||||
k8s.io/client-go v0.28.9/go.mod h1:GFDy3rUNId++WGrr0hRaBrs+y1eZz5JtVZODEalhRMo=
|
||||
k8s.io/cloud-provider v0.28.9 h1:FBW4Ii1NdXCHKprzkM8/s5BpxvLgJmYrZTNJABsVX7Y=
|
||||
k8s.io/cloud-provider v0.28.9/go.mod h1:7tFyiftAlSARvJS6mzZQQKKDQA81asNQ2usg35R3Exo=
|
||||
k8s.io/code-generator v0.28.9 h1:NyZt4+equopQNbwjSSpVikB15U4ghmvIaqn+VWd367U=
|
||||
k8s.io/code-generator v0.28.9/go.mod h1:WiJgVNDFAlT90nq6IOxhZ1gxL2JexbcfAx9ZBsyQ3Do=
|
||||
k8s.io/component-base v0.28.9 h1:ySM2PR8Z/xaUSG1Akd3yM6dqUezTltI7S5aV41MMuuc=
|
||||
k8s.io/component-base v0.28.9/go.mod h1:QtWzscEhCKRfHV24/S+11BwWjVxhC6fd3RYoEgZcWFU=
|
||||
k8s.io/component-helpers v0.28.9 h1:knX9F2nRoxF4wplgXO4C5tE4/k7HGszK3177Tm4+CUc=
|
||||
k8s.io/component-helpers v0.28.9/go.mod h1:TdAkLbywEDE2CB5h8LbM/W03T3k8wvqAaoPcEZrr6Z4=
|
||||
k8s.io/controller-manager v0.28.9 h1:muAtmO2mDN7pDkAJQMknvWy+WQhkvvi/jK1V82+qbLw=
|
||||
k8s.io/controller-manager v0.28.9/go.mod h1:RYP65K6GWLRWYZR7PRRaStfvgeXkhCGZwJsxRPuaDV0=
|
||||
k8s.io/cri-api v0.28.9 h1:AlhkmIDLeKWubmX2xVkW3DhcbPwH79xauFySijfkIDU=
|
||||
k8s.io/cri-api v0.28.9/go.mod h1:8/bPK3T4irPoj3LjriQc1TAIheeN2yWXR3mz+8jNZ8U=
|
||||
k8s.io/csi-translation-lib v0.28.9 h1:zl93l7wk0iwKInyRJfaodvsWf1z8QtWCN9a5OqHeT3o=
|
||||
k8s.io/csi-translation-lib v0.28.9/go.mod h1:eOniPQitdkuyVh+gtktg3yeDJQu/IidIUSMadDPLhak=
|
||||
k8s.io/dynamic-resource-allocation v0.28.9 h1:u3upC0ah0eNrO1uh3yUL3VefvB1OUTNQLKjxMfe1pgc=
|
||||
k8s.io/dynamic-resource-allocation v0.28.9/go.mod h1:SIwpYxFh5gk7bW1dZ+GgnA6l4VmhrnUugePlLxYva+4=
|
||||
k8s.io/api v0.32.6 h1:UiBAMRzTP24Tz9UT1uhhmAv1auGTT9PT/npywSk9JrU=
|
||||
k8s.io/api v0.32.6/go.mod h1:+iFCyQN34v2rsL53iQEN9lYE03mFdgPvgSXvATIDteg=
|
||||
k8s.io/apiextensions-apiserver v0.32.6 h1:B9zv1tpW+090Prav3GP53A4W2Bv908AAouZYJWp0fy8=
|
||||
k8s.io/apiextensions-apiserver v0.32.6/go.mod h1:3lAgylV3582qpXg8NWW4NOLdzxLC8mTcfPqqjAzOSTs=
|
||||
k8s.io/apimachinery v0.32.6 h1:odtEUjg7OT3132sBFsFn4Arj4Gd+BplYekmLQP8L3ak=
|
||||
k8s.io/apimachinery v0.32.6/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.32.6 h1:SKt+2e4klvHes4nw3moBI3mCPuh3RFp0XtYzsjZOOjk=
|
||||
k8s.io/apiserver v0.32.6/go.mod h1:CleW9S9cdw3EAevI/RCFc7RtHTEgbcTFhZV28IEdRtU=
|
||||
k8s.io/client-go v0.32.6 h1:Q+O+Sd9LKKFnsGZNVX2q1RDILYRpQZX+ea2RoIgjKlM=
|
||||
k8s.io/client-go v0.32.6/go.mod h1:yqL9XJ2cTXy3WdJwdeyob3O6xiLwWrh9DP7SeszniW0=
|
||||
k8s.io/cloud-provider v0.32.6 h1:oqjyLcDSFxBNB346Nd28RHNZnrKMjDoDIZowG8BpzUA=
|
||||
k8s.io/cloud-provider v0.32.6/go.mod h1:7cJA50QLh5+uTL/JlMlwgthbg15SvJQPZ2WJMMQqYDM=
|
||||
k8s.io/code-generator v0.32.6 h1:PxUFh/DLYQhL6t/hMjKaxUF5RDMV3YXh1FfzZkZ00uc=
|
||||
k8s.io/code-generator v0.32.6/go.mod h1:ZgPiUB+rnn6/tdyxi2o9E139v7AgwVeVcIVNUxOAeTc=
|
||||
k8s.io/component-base v0.32.6 h1:LwKaAlUcTyRouaqUdpQ+JLvtwyZlrWrNey1axNbE0ac=
|
||||
k8s.io/component-base v0.32.6/go.mod h1:fFJq5U4s+BAjmTV5gnT9CIfRbVujyITuO93ambmplcE=
|
||||
k8s.io/component-helpers v0.32.6 h1:HeTAIZeasda5HDwRDXxU/bP9J1WVNRnvvR7bCR1ic5U=
|
||||
k8s.io/component-helpers v0.32.6/go.mod h1:UN/vFKDmMyVFjWpexcqIrm/UQeM/r0guRwBGwNbcOnw=
|
||||
k8s.io/controller-manager v0.32.6 h1:hBnhPQQ1z8Ifdfjrg8mcOm3o3uttXxCtXtNB9okTKBI=
|
||||
k8s.io/controller-manager v0.32.6/go.mod h1:O82IkfFfoEnXl1OB2muDkTXtDt6hob0uvxOyn/50B0A=
|
||||
k8s.io/cri-api v0.32.6 h1:ad0LTtSM2Gsigr9Dt1PSsjzeb71j7vOHFbFtKucCfvI=
|
||||
k8s.io/cri-api v0.32.6/go.mod h1:DCzMuTh2padoinefWME0G678Mc3QFbLMF2vEweGzBAI=
|
||||
k8s.io/cri-client v0.32.6 h1:jwBApbijESaWi+jEfJp6bdLDlca9C/OAX7/kceEt12k=
|
||||
k8s.io/cri-client v0.32.6/go.mod h1:xLAQSh9EXTc79e0zLdCw9x1tNVDAqpKwnoEFKo1S8wM=
|
||||
k8s.io/csi-translation-lib v0.32.6 h1:AWHiUT8K/pcsl53str9EC9TQ6LZBTWF/L4YNaeRRlzs=
|
||||
k8s.io/csi-translation-lib v0.32.6/go.mod h1:Fza0F4T8ebNnkH/zApBQlJBHNH2GV9igplZZ+krnl/8=
|
||||
k8s.io/dynamic-resource-allocation v0.32.6 h1:omKI7f5mLe2maMyFdoZtCAPmYBNP8oWPcLLfUv/F21c=
|
||||
k8s.io/dynamic-resource-allocation v0.32.6/go.mod h1:h1WtBQ0aTwa94rOpCbUNxSA0IbfsrfmLT/R42ooPEtQ=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.28.9 h1:ApCWJulBl+uFRTr2jtTpG1lffmqqMuLnOH/RUbtO4UY=
|
||||
k8s.io/kms v0.28.9/go.mod h1:VgyAIRMFqZX9lHyixecU/JTI0wnPD1wCIlquvlXRJ+Y=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/kube-scheduler v0.28.9 h1:rRFkTBiPIpcCdyI7/E2HVyzWehmSW1eI/rWyKTVrKfk=
|
||||
k8s.io/kube-scheduler v0.28.9/go.mod h1:9xnBlTE/B2EwfiUZ/uh2zBEVViRhGG+ojLiMvyxXCe8=
|
||||
k8s.io/kubelet v0.28.9 h1:76v00fFLeniz27kXhGGUIxONdwa9LKcD2Jd5cXYAZko=
|
||||
k8s.io/kubelet v0.28.9/go.mod h1:46P39DFjI+E59nU2OgpatyS3oWy58ClulKO6riZ/97o=
|
||||
k8s.io/kubernetes v1.28.9 h1:I4sYGQJOuxEo4/QWoY7M8kDB7O0HcH266t6o6mR6ogg=
|
||||
k8s.io/kubernetes v1.28.9/go.mod h1:chlmcCDBnOA/y+572cw8dO0Rci1wiA8bm5+zhPdFLCk=
|
||||
k8s.io/mount-utils v0.28.9 h1:RWt7xIrTzoKYKmMZ9Lh/rkZ9zreCUdpzhFe8jJXXuNQ=
|
||||
k8s.io/mount-utils v0.28.9/go.mod h1:ZxAFXgKzcAyi3VTd2pKFlZFswl9Q/cveJ5aptdjQOuc=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw=
|
||||
sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.32.6 h1:xgfjoJj6iBbMinO0W1OGCKLhTLwPyW+6k2n8KznioSE=
|
||||
k8s.io/kms v0.32.6/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/kube-scheduler v0.32.0 h1:FCsF/3TPvR51ptx/gLUrqcoKqAMhQKrydYCJzPz9VGM=
|
||||
k8s.io/kube-scheduler v0.32.0/go.mod h1:yof3vmyx70TWoQ6XZruYEGIUT/r0H/ELGdnWiqPF5EE=
|
||||
k8s.io/kubelet v0.32.6 h1:6k4ziX0J9Ba6e/MhVgco6XvbM/sgBDNrBUJaQvCb8Qo=
|
||||
k8s.io/kubelet v0.32.6/go.mod h1:IjMboyz52sXR3CHJmxdTnfJw5RnhJetN7vbs8mqzn6w=
|
||||
k8s.io/kubernetes v1.32.6 h1:tp1gRjOqZjaoFBek5PN6eSmODdS1QRrH5UKiFP8ZByg=
|
||||
k8s.io/kubernetes v1.32.6/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY=
|
||||
k8s.io/mount-utils v0.32.6 h1:eUzjHqe7FPK54MGeZf5TAD29OlF9ChutDomXvp0E4gM=
|
||||
k8s.io/mount-utils v0.32.6/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc=
|
||||
sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: daemon
|
||||
name: kruise-daemon-win
|
||||
namespace: kruise-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: daemon
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: daemon
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: type
|
||||
operator: NotIn
|
||||
values:
|
||||
- virtual-kubelet
|
||||
containers:
|
||||
- args:
|
||||
- --logtostderr=true
|
||||
- --v=5
|
||||
- --addr=:10221
|
||||
- --feature-gates=ImagePullJobGate=true
|
||||
- --enable-pprof=true
|
||||
- --pprof-addr=localhost:10222
|
||||
workingDir: "$env:CONTAINER_SANDBOX_MOUNT_POINT/"
|
||||
command:
|
||||
- $env:CONTAINER_SANDBOX_MOUNT_POINT/kruise-daemon.exe
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
image: openkruise/kruise-daemon-win:test # Replace with the actual image
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10221
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: daemon
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: "0"
|
||||
memory: "0"
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext:
|
||||
windowsOptions:
|
||||
hostProcess: true
|
||||
runAsUserName: "NT AUTHORITY\\SYSTEM"
|
||||
serviceAccount: kruise-daemon
|
||||
serviceAccountName: kruise-daemon
|
||||
terminationGracePeriodSeconds: 10
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
nodeSelector:
|
||||
kubernetes.io/os: windows
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 10%
|
||||
type: RollingUpdate
|
|
@ -23,11 +23,12 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/common"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
appspub "github.com/openkruise/kruise/apis/apps/pub"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
"k8s.io/kube-openapi/pkg/common"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// Generate OpenAPI spec definitions for Kruise Resources
|
||||
|
|
14
main.go
14
main.go
|
@ -33,6 +33,9 @@ import (
|
|||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
|
@ -64,6 +67,7 @@ const (
|
|||
defaultRenewDeadline = 10 * time.Second
|
||||
defaultRetryPeriod = 2 * time.Second
|
||||
defaultControllerCacheSyncTimeout = 2 * time.Minute
|
||||
defaultWebhookInitializeTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -99,6 +103,7 @@ func main() {
|
|||
var leaderElectionId string
|
||||
var retryPeriod time.Duration
|
||||
var controllerCacheSyncTimeout time.Duration
|
||||
var webhookInitializeTimeout time.Duration
|
||||
|
||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||
flag.StringVar(&healthProbeAddr, "health-probe-addr", ":8000", "The address the healthz/readyz endpoint binds to.")
|
||||
|
@ -123,13 +128,20 @@ func main() {
|
|||
flag.DurationVar(&retryPeriod, "leader-election-retry-period", defaultRetryPeriod,
|
||||
"leader-election-retry-period is the duration the LeaderElector clients should wait between tries of actions. Default is 2 seconds.")
|
||||
flag.DurationVar(&controllerCacheSyncTimeout, "controller-cache-sync-timeout", defaultControllerCacheSyncTimeout, "CacheSyncTimeout refers to the time limit set to wait for syncing caches. Defaults to 2 minutes if not set.")
|
||||
flag.DurationVar(&webhookInitializeTimeout, "webhook-initialize-timeout", defaultWebhookInitializeTimeout, "WebhookInitializeTimeout refers to the time limit set to wait for webhook initialization. Defaults to 60 seconds if not set.")
|
||||
|
||||
utilfeature.DefaultMutableFeatureGate.AddFlag(pflag.CommandLine)
|
||||
logOptions := logs.NewOptions()
|
||||
logsapi.AddFlags(logOptions, pflag.CommandLine)
|
||||
klog.InitFlags(nil)
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
pflag.Parse()
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
ctrl.SetLogger(klogr.New())
|
||||
if err := logsapi.ValidateAndApply(logOptions, nil); err != nil {
|
||||
setupLog.Error(err, "logsapi ValidateAndApply failed")
|
||||
os.Exit(1)
|
||||
}
|
||||
features.SetDefaultFeatureGates()
|
||||
util.SetControllerCacheSyncTimeout(controllerCacheSyncTimeout)
|
||||
|
||||
|
@ -222,7 +234,7 @@ func main() {
|
|||
|
||||
// +kubebuilder:scaffold:builder
|
||||
setupLog.Info("initialize webhook")
|
||||
if err := webhook.Initialize(ctx, cfg); err != nil {
|
||||
if err := webhook.Initialize(ctx, cfg, webhookInitializeTimeout); err != nil {
|
||||
setupLog.Error(err, "unable to initialize webhook")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ limitations under the License.
|
|||
package versioned
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
fmt "fmt"
|
||||
http "net/http"
|
||||
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1beta1"
|
||||
|
|
|
@ -34,8 +34,12 @@ import (
|
|||
|
||||
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
||||
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
||||
// without applying any validations and/or defaults. It shouldn't be considered a replacement
|
||||
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
|
||||
// for a real clientset and is mostly useful in simple unit tests.
|
||||
//
|
||||
// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
|
||||
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
|
||||
// via --with-applyconfig).
|
||||
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
||||
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||
for _, obj := range objects {
|
||||
|
|
|
@ -18,15 +18,14 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// AdvancedCronJobsGetter has a method to return a AdvancedCronJobInterface.
|
||||
|
@ -37,158 +36,34 @@ type AdvancedCronJobsGetter interface {
|
|||
|
||||
// AdvancedCronJobInterface has methods to work with AdvancedCronJob resources.
|
||||
type AdvancedCronJobInterface interface {
|
||||
Create(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.CreateOptions) (*v1alpha1.AdvancedCronJob, error)
|
||||
Update(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (*v1alpha1.AdvancedCronJob, error)
|
||||
UpdateStatus(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (*v1alpha1.AdvancedCronJob, error)
|
||||
Create(ctx context.Context, advancedCronJob *appsv1alpha1.AdvancedCronJob, opts v1.CreateOptions) (*appsv1alpha1.AdvancedCronJob, error)
|
||||
Update(ctx context.Context, advancedCronJob *appsv1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (*appsv1alpha1.AdvancedCronJob, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, advancedCronJob *appsv1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (*appsv1alpha1.AdvancedCronJob, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AdvancedCronJob, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AdvancedCronJobList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1alpha1.AdvancedCronJob, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*appsv1alpha1.AdvancedCronJobList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AdvancedCronJob, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1alpha1.AdvancedCronJob, err error)
|
||||
AdvancedCronJobExpansion
|
||||
}
|
||||
|
||||
// advancedCronJobs implements AdvancedCronJobInterface
|
||||
type advancedCronJobs struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
*gentype.ClientWithList[*appsv1alpha1.AdvancedCronJob, *appsv1alpha1.AdvancedCronJobList]
|
||||
}
|
||||
|
||||
// newAdvancedCronJobs returns a AdvancedCronJobs
|
||||
func newAdvancedCronJobs(c *AppsV1alpha1Client, namespace string) *advancedCronJobs {
|
||||
return &advancedCronJobs{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
gentype.NewClientWithList[*appsv1alpha1.AdvancedCronJob, *appsv1alpha1.AdvancedCronJobList](
|
||||
"advancedcronjobs",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *appsv1alpha1.AdvancedCronJob { return &appsv1alpha1.AdvancedCronJob{} },
|
||||
func() *appsv1alpha1.AdvancedCronJobList { return &appsv1alpha1.AdvancedCronJobList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the advancedCronJob, and returns the corresponding advancedCronJob object, and an error if there is any.
|
||||
func (c *advancedCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
result = &v1alpha1.AdvancedCronJob{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of AdvancedCronJobs that match those selectors.
|
||||
func (c *advancedCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AdvancedCronJobList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.AdvancedCronJobList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested advancedCronJobs.
|
||||
func (c *advancedCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a advancedCronJob and creates it. Returns the server's representation of the advancedCronJob, and an error, if there is any.
|
||||
func (c *advancedCronJobs) Create(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.CreateOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
result = &v1alpha1.AdvancedCronJob{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(advancedCronJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a advancedCronJob and updates it. Returns the server's representation of the advancedCronJob, and an error, if there is any.
|
||||
func (c *advancedCronJobs) Update(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
result = &v1alpha1.AdvancedCronJob{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
Name(advancedCronJob.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(advancedCronJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *advancedCronJobs) UpdateStatus(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
result = &v1alpha1.AdvancedCronJob{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
Name(advancedCronJob.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(advancedCronJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the advancedCronJob and deletes it. Returns an error if one occurs.
|
||||
func (c *advancedCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *advancedCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched advancedCronJob.
|
||||
func (c *advancedCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
result = &v1alpha1.AdvancedCronJob{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("advancedcronjobs").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,10 +18,10 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
http "net/http"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
"github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -164,10 +164,10 @@ func New(c rest.Interface) *AppsV1alpha1Client {
|
|||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1alpha1.SchemeGroupVersion
|
||||
gv := appsv1alpha1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
|
|
|
@ -18,15 +18,14 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// BroadcastJobsGetter has a method to return a BroadcastJobInterface.
|
||||
|
@ -37,158 +36,34 @@ type BroadcastJobsGetter interface {
|
|||
|
||||
// BroadcastJobInterface has methods to work with BroadcastJob resources.
|
||||
type BroadcastJobInterface interface {
|
||||
Create(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.CreateOptions) (*v1alpha1.BroadcastJob, error)
|
||||
Update(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.UpdateOptions) (*v1alpha1.BroadcastJob, error)
|
||||
UpdateStatus(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.UpdateOptions) (*v1alpha1.BroadcastJob, error)
|
||||
Create(ctx context.Context, broadcastJob *appsv1alpha1.BroadcastJob, opts v1.CreateOptions) (*appsv1alpha1.BroadcastJob, error)
|
||||
Update(ctx context.Context, broadcastJob *appsv1alpha1.BroadcastJob, opts v1.UpdateOptions) (*appsv1alpha1.BroadcastJob, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, broadcastJob *appsv1alpha1.BroadcastJob, opts v1.UpdateOptions) (*appsv1alpha1.BroadcastJob, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BroadcastJob, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BroadcastJobList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1alpha1.BroadcastJob, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*appsv1alpha1.BroadcastJobList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BroadcastJob, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1alpha1.BroadcastJob, err error)
|
||||
BroadcastJobExpansion
|
||||
}
|
||||
|
||||
// broadcastJobs implements BroadcastJobInterface
|
||||
type broadcastJobs struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
*gentype.ClientWithList[*appsv1alpha1.BroadcastJob, *appsv1alpha1.BroadcastJobList]
|
||||
}
|
||||
|
||||
// newBroadcastJobs returns a BroadcastJobs
|
||||
func newBroadcastJobs(c *AppsV1alpha1Client, namespace string) *broadcastJobs {
|
||||
return &broadcastJobs{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
gentype.NewClientWithList[*appsv1alpha1.BroadcastJob, *appsv1alpha1.BroadcastJobList](
|
||||
"broadcastjobs",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *appsv1alpha1.BroadcastJob { return &appsv1alpha1.BroadcastJob{} },
|
||||
func() *appsv1alpha1.BroadcastJobList { return &appsv1alpha1.BroadcastJobList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the broadcastJob, and returns the corresponding broadcastJob object, and an error if there is any.
|
||||
func (c *broadcastJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
result = &v1alpha1.BroadcastJob{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of BroadcastJobs that match those selectors.
|
||||
func (c *broadcastJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BroadcastJobList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.BroadcastJobList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested broadcastJobs.
|
||||
func (c *broadcastJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a broadcastJob and creates it. Returns the server's representation of the broadcastJob, and an error, if there is any.
|
||||
func (c *broadcastJobs) Create(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.CreateOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
result = &v1alpha1.BroadcastJob{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(broadcastJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a broadcastJob and updates it. Returns the server's representation of the broadcastJob, and an error, if there is any.
|
||||
func (c *broadcastJobs) Update(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.UpdateOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
result = &v1alpha1.BroadcastJob{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
Name(broadcastJob.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(broadcastJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *broadcastJobs) UpdateStatus(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.UpdateOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
result = &v1alpha1.BroadcastJob{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
Name(broadcastJob.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(broadcastJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the broadcastJob and deletes it. Returns an error if one occurs.
|
||||
func (c *broadcastJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *broadcastJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched broadcastJob.
|
||||
func (c *broadcastJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BroadcastJob, err error) {
|
||||
result = &v1alpha1.BroadcastJob{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("broadcastjobs").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,16 +18,15 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// CloneSetsGetter has a method to return a CloneSetInterface.
|
||||
|
@ -38,15 +37,16 @@ type CloneSetsGetter interface {
|
|||
|
||||
// CloneSetInterface has methods to work with CloneSet resources.
|
||||
type CloneSetInterface interface {
|
||||
Create(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.CreateOptions) (*v1alpha1.CloneSet, error)
|
||||
Update(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.UpdateOptions) (*v1alpha1.CloneSet, error)
|
||||
UpdateStatus(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.UpdateOptions) (*v1alpha1.CloneSet, error)
|
||||
Create(ctx context.Context, cloneSet *appsv1alpha1.CloneSet, opts v1.CreateOptions) (*appsv1alpha1.CloneSet, error)
|
||||
Update(ctx context.Context, cloneSet *appsv1alpha1.CloneSet, opts v1.UpdateOptions) (*appsv1alpha1.CloneSet, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, cloneSet *appsv1alpha1.CloneSet, opts v1.UpdateOptions) (*appsv1alpha1.CloneSet, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CloneSet, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CloneSetList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1alpha1.CloneSet, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*appsv1alpha1.CloneSetList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloneSet, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1alpha1.CloneSet, err error)
|
||||
GetScale(ctx context.Context, cloneSetName string, options v1.GetOptions) (*autoscalingv1.Scale, error)
|
||||
UpdateScale(ctx context.Context, cloneSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (*autoscalingv1.Scale, error)
|
||||
|
||||
|
@ -55,153 +55,28 @@ type CloneSetInterface interface {
|
|||
|
||||
// cloneSets implements CloneSetInterface
|
||||
type cloneSets struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
*gentype.ClientWithList[*appsv1alpha1.CloneSet, *appsv1alpha1.CloneSetList]
|
||||
}
|
||||
|
||||
// newCloneSets returns a CloneSets
|
||||
func newCloneSets(c *AppsV1alpha1Client, namespace string) *cloneSets {
|
||||
return &cloneSets{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
gentype.NewClientWithList[*appsv1alpha1.CloneSet, *appsv1alpha1.CloneSetList](
|
||||
"clonesets",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *appsv1alpha1.CloneSet { return &appsv1alpha1.CloneSet{} },
|
||||
func() *appsv1alpha1.CloneSetList { return &appsv1alpha1.CloneSetList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the cloneSet, and returns the corresponding cloneSet object, and an error if there is any.
|
||||
func (c *cloneSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
result = &v1alpha1.CloneSet{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of CloneSets that match those selectors.
|
||||
func (c *cloneSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloneSetList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.CloneSetList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested cloneSets.
|
||||
func (c *cloneSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a cloneSet and creates it. Returns the server's representation of the cloneSet, and an error, if there is any.
|
||||
func (c *cloneSets) Create(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.CreateOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
result = &v1alpha1.CloneSet{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(cloneSet).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a cloneSet and updates it. Returns the server's representation of the cloneSet, and an error, if there is any.
|
||||
func (c *cloneSets) Update(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.UpdateOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
result = &v1alpha1.CloneSet{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
Name(cloneSet.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(cloneSet).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *cloneSets) UpdateStatus(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.UpdateOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
result = &v1alpha1.CloneSet{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
Name(cloneSet.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(cloneSet).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the cloneSet and deletes it. Returns an error if one occurs.
|
||||
func (c *cloneSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *cloneSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched cloneSet.
|
||||
func (c *cloneSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloneSet, err error) {
|
||||
result = &v1alpha1.CloneSet{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("clonesets").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// GetScale takes name of the cloneSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
|
||||
func (c *cloneSets) GetScale(ctx context.Context, cloneSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) {
|
||||
result = &autoscalingv1.Scale{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
err = c.GetClient().Get().
|
||||
Namespace(c.GetNamespace()).
|
||||
Resource("clonesets").
|
||||
Name(cloneSetName).
|
||||
SubResource("scale").
|
||||
|
@ -214,8 +89,8 @@ func (c *cloneSets) GetScale(ctx context.Context, cloneSetName string, options v
|
|||
// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
|
||||
func (c *cloneSets) UpdateScale(ctx context.Context, cloneSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
|
||||
result = &autoscalingv1.Scale{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
err = c.GetClient().Put().
|
||||
Namespace(c.GetNamespace()).
|
||||
Resource("clonesets").
|
||||
Name(cloneSetName).
|
||||
SubResource("scale").
|
||||
|
|
|
@ -18,15 +18,14 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// ContainerRecreateRequestsGetter has a method to return a ContainerRecreateRequestInterface.
|
||||
|
@ -37,158 +36,34 @@ type ContainerRecreateRequestsGetter interface {
|
|||
|
||||
// ContainerRecreateRequestInterface has methods to work with ContainerRecreateRequest resources.
|
||||
type ContainerRecreateRequestInterface interface {
|
||||
Create(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.CreateOptions) (*v1alpha1.ContainerRecreateRequest, error)
|
||||
Update(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (*v1alpha1.ContainerRecreateRequest, error)
|
||||
UpdateStatus(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (*v1alpha1.ContainerRecreateRequest, error)
|
||||
Create(ctx context.Context, containerRecreateRequest *appsv1alpha1.ContainerRecreateRequest, opts v1.CreateOptions) (*appsv1alpha1.ContainerRecreateRequest, error)
|
||||
Update(ctx context.Context, containerRecreateRequest *appsv1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (*appsv1alpha1.ContainerRecreateRequest, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, containerRecreateRequest *appsv1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (*appsv1alpha1.ContainerRecreateRequest, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ContainerRecreateRequest, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ContainerRecreateRequestList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1alpha1.ContainerRecreateRequest, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*appsv1alpha1.ContainerRecreateRequestList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ContainerRecreateRequest, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1alpha1.ContainerRecreateRequest, err error)
|
||||
ContainerRecreateRequestExpansion
|
||||
}
|
||||
|
||||
// containerRecreateRequests implements ContainerRecreateRequestInterface
|
||||
type containerRecreateRequests struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
*gentype.ClientWithList[*appsv1alpha1.ContainerRecreateRequest, *appsv1alpha1.ContainerRecreateRequestList]
|
||||
}
|
||||
|
||||
// newContainerRecreateRequests returns a ContainerRecreateRequests
|
||||
func newContainerRecreateRequests(c *AppsV1alpha1Client, namespace string) *containerRecreateRequests {
|
||||
return &containerRecreateRequests{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
gentype.NewClientWithList[*appsv1alpha1.ContainerRecreateRequest, *appsv1alpha1.ContainerRecreateRequestList](
|
||||
"containerrecreaterequests",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *appsv1alpha1.ContainerRecreateRequest { return &appsv1alpha1.ContainerRecreateRequest{} },
|
||||
func() *appsv1alpha1.ContainerRecreateRequestList { return &appsv1alpha1.ContainerRecreateRequestList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the containerRecreateRequest, and returns the corresponding containerRecreateRequest object, and an error if there is any.
|
||||
func (c *containerRecreateRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
result = &v1alpha1.ContainerRecreateRequest{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ContainerRecreateRequests that match those selectors.
|
||||
func (c *containerRecreateRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ContainerRecreateRequestList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.ContainerRecreateRequestList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested containerRecreateRequests.
|
||||
func (c *containerRecreateRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a containerRecreateRequest and creates it. Returns the server's representation of the containerRecreateRequest, and an error, if there is any.
|
||||
func (c *containerRecreateRequests) Create(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.CreateOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
result = &v1alpha1.ContainerRecreateRequest{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(containerRecreateRequest).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a containerRecreateRequest and updates it. Returns the server's representation of the containerRecreateRequest, and an error, if there is any.
|
||||
func (c *containerRecreateRequests) Update(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
result = &v1alpha1.ContainerRecreateRequest{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
Name(containerRecreateRequest.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(containerRecreateRequest).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *containerRecreateRequests) UpdateStatus(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
result = &v1alpha1.ContainerRecreateRequest{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
Name(containerRecreateRequest.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(containerRecreateRequest).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the containerRecreateRequest and deletes it. Returns an error if one occurs.
|
||||
func (c *containerRecreateRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *containerRecreateRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched containerRecreateRequest.
|
||||
func (c *containerRecreateRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
result = &v1alpha1.ContainerRecreateRequest{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("containerrecreaterequests").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,15 +18,14 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// DaemonSetsGetter has a method to return a DaemonSetInterface.
|
||||
|
@ -37,158 +36,34 @@ type DaemonSetsGetter interface {
|
|||
|
||||
// DaemonSetInterface has methods to work with DaemonSet resources.
|
||||
type DaemonSetInterface interface {
|
||||
Create(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.CreateOptions) (*v1alpha1.DaemonSet, error)
|
||||
Update(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.UpdateOptions) (*v1alpha1.DaemonSet, error)
|
||||
UpdateStatus(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.UpdateOptions) (*v1alpha1.DaemonSet, error)
|
||||
Create(ctx context.Context, daemonSet *appsv1alpha1.DaemonSet, opts v1.CreateOptions) (*appsv1alpha1.DaemonSet, error)
|
||||
Update(ctx context.Context, daemonSet *appsv1alpha1.DaemonSet, opts v1.UpdateOptions) (*appsv1alpha1.DaemonSet, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, daemonSet *appsv1alpha1.DaemonSet, opts v1.UpdateOptions) (*appsv1alpha1.DaemonSet, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.DaemonSet, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.DaemonSetList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1alpha1.DaemonSet, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*appsv1alpha1.DaemonSetList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DaemonSet, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1alpha1.DaemonSet, err error)
|
||||
DaemonSetExpansion
|
||||
}
|
||||
|
||||
// daemonSets implements DaemonSetInterface
|
||||
type daemonSets struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
*gentype.ClientWithList[*appsv1alpha1.DaemonSet, *appsv1alpha1.DaemonSetList]
|
||||
}
|
||||
|
||||
// newDaemonSets returns a DaemonSets
|
||||
func newDaemonSets(c *AppsV1alpha1Client, namespace string) *daemonSets {
|
||||
return &daemonSets{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
gentype.NewClientWithList[*appsv1alpha1.DaemonSet, *appsv1alpha1.DaemonSetList](
|
||||
"daemonsets",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *appsv1alpha1.DaemonSet { return &appsv1alpha1.DaemonSet{} },
|
||||
func() *appsv1alpha1.DaemonSetList { return &appsv1alpha1.DaemonSetList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
|
||||
func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
result = &v1alpha1.DaemonSet{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
|
||||
func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DaemonSetList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.DaemonSetList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested daemonSets.
|
||||
func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any.
|
||||
func (c *daemonSets) Create(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.CreateOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
result = &v1alpha1.DaemonSet{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(daemonSet).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
|
||||
func (c *daemonSets) Update(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.UpdateOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
result = &v1alpha1.DaemonSet{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
Name(daemonSet.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(daemonSet).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.UpdateOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
result = &v1alpha1.DaemonSet{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
Name(daemonSet.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(daemonSet).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
|
||||
func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched daemonSet.
|
||||
func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DaemonSet, err error) {
|
||||
result = &v1alpha1.DaemonSet{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("daemonsets").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,15 +18,14 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
scheme "github.com/openkruise/kruise/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// EphemeralJobsGetter has a method to return a EphemeralJobInterface.
|
||||
|
@ -37,158 +36,34 @@ type EphemeralJobsGetter interface {
|
|||
|
||||
// EphemeralJobInterface has methods to work with EphemeralJob resources.
|
||||
type EphemeralJobInterface interface {
|
||||
Create(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.CreateOptions) (*v1alpha1.EphemeralJob, error)
|
||||
Update(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.UpdateOptions) (*v1alpha1.EphemeralJob, error)
|
||||
UpdateStatus(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.UpdateOptions) (*v1alpha1.EphemeralJob, error)
|
||||
Create(ctx context.Context, ephemeralJob *appsv1alpha1.EphemeralJob, opts v1.CreateOptions) (*appsv1alpha1.EphemeralJob, error)
|
||||
Update(ctx context.Context, ephemeralJob *appsv1alpha1.EphemeralJob, opts v1.UpdateOptions) (*appsv1alpha1.EphemeralJob, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, ephemeralJob *appsv1alpha1.EphemeralJob, opts v1.UpdateOptions) (*appsv1alpha1.EphemeralJob, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.EphemeralJob, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.EphemeralJobList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1alpha1.EphemeralJob, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*appsv1alpha1.EphemeralJobList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EphemeralJob, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1alpha1.EphemeralJob, err error)
|
||||
EphemeralJobExpansion
|
||||
}
|
||||
|
||||
// ephemeralJobs implements EphemeralJobInterface
|
||||
type ephemeralJobs struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
*gentype.ClientWithList[*appsv1alpha1.EphemeralJob, *appsv1alpha1.EphemeralJobList]
|
||||
}
|
||||
|
||||
// newEphemeralJobs returns a EphemeralJobs
|
||||
func newEphemeralJobs(c *AppsV1alpha1Client, namespace string) *ephemeralJobs {
|
||||
return &ephemeralJobs{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
gentype.NewClientWithList[*appsv1alpha1.EphemeralJob, *appsv1alpha1.EphemeralJobList](
|
||||
"ephemeraljobs",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *appsv1alpha1.EphemeralJob { return &appsv1alpha1.EphemeralJob{} },
|
||||
func() *appsv1alpha1.EphemeralJobList { return &appsv1alpha1.EphemeralJobList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the ephemeralJob, and returns the corresponding ephemeralJob object, and an error if there is any.
|
||||
func (c *ephemeralJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
result = &v1alpha1.EphemeralJob{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of EphemeralJobs that match those selectors.
|
||||
func (c *ephemeralJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EphemeralJobList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.EphemeralJobList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested ephemeralJobs.
|
||||
func (c *ephemeralJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a ephemeralJob and creates it. Returns the server's representation of the ephemeralJob, and an error, if there is any.
|
||||
func (c *ephemeralJobs) Create(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.CreateOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
result = &v1alpha1.EphemeralJob{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(ephemeralJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a ephemeralJob and updates it. Returns the server's representation of the ephemeralJob, and an error, if there is any.
|
||||
func (c *ephemeralJobs) Update(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.UpdateOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
result = &v1alpha1.EphemeralJob{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
Name(ephemeralJob.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(ephemeralJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *ephemeralJobs) UpdateStatus(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.UpdateOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
result = &v1alpha1.EphemeralJob{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
Name(ephemeralJob.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(ephemeralJob).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the ephemeralJob and deletes it. Returns an error if one occurs.
|
||||
func (c *ephemeralJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *ephemeralJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched ephemeralJob.
|
||||
func (c *ephemeralJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EphemeralJob, err error) {
|
||||
result = &v1alpha1.EphemeralJob{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("ephemeraljobs").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,123 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeAdvancedCronJobs implements AdvancedCronJobInterface
|
||||
type FakeAdvancedCronJobs struct {
|
||||
// fakeAdvancedCronJobs implements AdvancedCronJobInterface
|
||||
type fakeAdvancedCronJobs struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.AdvancedCronJob, *v1alpha1.AdvancedCronJobList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var advancedcronjobsResource = v1alpha1.SchemeGroupVersion.WithResource("advancedcronjobs")
|
||||
|
||||
var advancedcronjobsKind = v1alpha1.SchemeGroupVersion.WithKind("AdvancedCronJob")
|
||||
|
||||
// Get takes name of the advancedCronJob, and returns the corresponding advancedCronJob object, and an error if there is any.
|
||||
func (c *FakeAdvancedCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(advancedcronjobsResource, c.ns, name), &v1alpha1.AdvancedCronJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeAdvancedCronJobs(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.AdvancedCronJobInterface {
|
||||
return &fakeAdvancedCronJobs{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.AdvancedCronJob, *v1alpha1.AdvancedCronJobList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("advancedcronjobs"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("AdvancedCronJob"),
|
||||
func() *v1alpha1.AdvancedCronJob { return &v1alpha1.AdvancedCronJob{} },
|
||||
func() *v1alpha1.AdvancedCronJobList { return &v1alpha1.AdvancedCronJobList{} },
|
||||
func(dst, src *v1alpha1.AdvancedCronJobList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.AdvancedCronJobList) []*v1alpha1.AdvancedCronJob {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.AdvancedCronJobList, items []*v1alpha1.AdvancedCronJob) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.AdvancedCronJob), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of AdvancedCronJobs that match those selectors.
|
||||
func (c *FakeAdvancedCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AdvancedCronJobList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(advancedcronjobsResource, advancedcronjobsKind, c.ns, opts), &v1alpha1.AdvancedCronJobList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.AdvancedCronJobList{ListMeta: obj.(*v1alpha1.AdvancedCronJobList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.AdvancedCronJobList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested advancedCronJobs.
|
||||
func (c *FakeAdvancedCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(advancedcronjobsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a advancedCronJob and creates it. Returns the server's representation of the advancedCronJob, and an error, if there is any.
|
||||
func (c *FakeAdvancedCronJobs) Create(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.CreateOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(advancedcronjobsResource, c.ns, advancedCronJob), &v1alpha1.AdvancedCronJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.AdvancedCronJob), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a advancedCronJob and updates it. Returns the server's representation of the advancedCronJob, and an error, if there is any.
|
||||
func (c *FakeAdvancedCronJobs) Update(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(advancedcronjobsResource, c.ns, advancedCronJob), &v1alpha1.AdvancedCronJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.AdvancedCronJob), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeAdvancedCronJobs) UpdateStatus(ctx context.Context, advancedCronJob *v1alpha1.AdvancedCronJob, opts v1.UpdateOptions) (*v1alpha1.AdvancedCronJob, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(advancedcronjobsResource, "status", c.ns, advancedCronJob), &v1alpha1.AdvancedCronJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.AdvancedCronJob), err
|
||||
}
|
||||
|
||||
// Delete takes name of the advancedCronJob and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeAdvancedCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(advancedcronjobsResource, c.ns, name, opts), &v1alpha1.AdvancedCronJob{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeAdvancedCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(advancedcronjobsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.AdvancedCronJobList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched advancedCronJob.
|
||||
func (c *FakeAdvancedCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AdvancedCronJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(advancedcronjobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.AdvancedCronJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.AdvancedCronJob), err
|
||||
}
|
||||
|
|
|
@ -28,71 +28,71 @@ type FakeAppsV1alpha1 struct {
|
|||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) AdvancedCronJobs(namespace string) v1alpha1.AdvancedCronJobInterface {
|
||||
return &FakeAdvancedCronJobs{c, namespace}
|
||||
return newFakeAdvancedCronJobs(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) BroadcastJobs(namespace string) v1alpha1.BroadcastJobInterface {
|
||||
return &FakeBroadcastJobs{c, namespace}
|
||||
return newFakeBroadcastJobs(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) CloneSets(namespace string) v1alpha1.CloneSetInterface {
|
||||
return &FakeCloneSets{c, namespace}
|
||||
return newFakeCloneSets(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) ContainerRecreateRequests(namespace string) v1alpha1.ContainerRecreateRequestInterface {
|
||||
return &FakeContainerRecreateRequests{c, namespace}
|
||||
return newFakeContainerRecreateRequests(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) DaemonSets(namespace string) v1alpha1.DaemonSetInterface {
|
||||
return &FakeDaemonSets{c, namespace}
|
||||
return newFakeDaemonSets(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) EphemeralJobs(namespace string) v1alpha1.EphemeralJobInterface {
|
||||
return &FakeEphemeralJobs{c, namespace}
|
||||
return newFakeEphemeralJobs(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) ImageListPullJobs(namespace string) v1alpha1.ImageListPullJobInterface {
|
||||
return &FakeImageListPullJobs{c, namespace}
|
||||
return newFakeImageListPullJobs(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) ImagePullJobs(namespace string) v1alpha1.ImagePullJobInterface {
|
||||
return &FakeImagePullJobs{c, namespace}
|
||||
return newFakeImagePullJobs(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) NodeImages() v1alpha1.NodeImageInterface {
|
||||
return &FakeNodeImages{c}
|
||||
return newFakeNodeImages(c)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) NodePodProbes() v1alpha1.NodePodProbeInterface {
|
||||
return &FakeNodePodProbes{c}
|
||||
return newFakeNodePodProbes(c)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) PersistentPodStates(namespace string) v1alpha1.PersistentPodStateInterface {
|
||||
return &FakePersistentPodStates{c, namespace}
|
||||
return newFakePersistentPodStates(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) PodProbeMarkers(namespace string) v1alpha1.PodProbeMarkerInterface {
|
||||
return &FakePodProbeMarkers{c, namespace}
|
||||
return newFakePodProbeMarkers(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) ResourceDistributions() v1alpha1.ResourceDistributionInterface {
|
||||
return &FakeResourceDistributions{c}
|
||||
return newFakeResourceDistributions(c)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) SidecarSets() v1alpha1.SidecarSetInterface {
|
||||
return &FakeSidecarSets{c}
|
||||
return newFakeSidecarSets(c)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) StatefulSets(namespace string) v1alpha1.StatefulSetInterface {
|
||||
return &FakeStatefulSets{c, namespace}
|
||||
return newFakeStatefulSets(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) UnitedDeployments(namespace string) v1alpha1.UnitedDeploymentInterface {
|
||||
return &FakeUnitedDeployments{c, namespace}
|
||||
return newFakeUnitedDeployments(c, namespace)
|
||||
}
|
||||
|
||||
func (c *FakeAppsV1alpha1) WorkloadSpreads(namespace string) v1alpha1.WorkloadSpreadInterface {
|
||||
return &FakeWorkloadSpreads{c, namespace}
|
||||
return newFakeWorkloadSpreads(c, namespace)
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
|
|
@ -18,123 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeBroadcastJobs implements BroadcastJobInterface
|
||||
type FakeBroadcastJobs struct {
|
||||
// fakeBroadcastJobs implements BroadcastJobInterface
|
||||
type fakeBroadcastJobs struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.BroadcastJob, *v1alpha1.BroadcastJobList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var broadcastjobsResource = v1alpha1.SchemeGroupVersion.WithResource("broadcastjobs")
|
||||
|
||||
var broadcastjobsKind = v1alpha1.SchemeGroupVersion.WithKind("BroadcastJob")
|
||||
|
||||
// Get takes name of the broadcastJob, and returns the corresponding broadcastJob object, and an error if there is any.
|
||||
func (c *FakeBroadcastJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(broadcastjobsResource, c.ns, name), &v1alpha1.BroadcastJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeBroadcastJobs(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.BroadcastJobInterface {
|
||||
return &fakeBroadcastJobs{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.BroadcastJob, *v1alpha1.BroadcastJobList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("broadcastjobs"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("BroadcastJob"),
|
||||
func() *v1alpha1.BroadcastJob { return &v1alpha1.BroadcastJob{} },
|
||||
func() *v1alpha1.BroadcastJobList { return &v1alpha1.BroadcastJobList{} },
|
||||
func(dst, src *v1alpha1.BroadcastJobList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.BroadcastJobList) []*v1alpha1.BroadcastJob {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.BroadcastJobList, items []*v1alpha1.BroadcastJob) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.BroadcastJob), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of BroadcastJobs that match those selectors.
|
||||
func (c *FakeBroadcastJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BroadcastJobList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(broadcastjobsResource, broadcastjobsKind, c.ns, opts), &v1alpha1.BroadcastJobList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.BroadcastJobList{ListMeta: obj.(*v1alpha1.BroadcastJobList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.BroadcastJobList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested broadcastJobs.
|
||||
func (c *FakeBroadcastJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(broadcastjobsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a broadcastJob and creates it. Returns the server's representation of the broadcastJob, and an error, if there is any.
|
||||
func (c *FakeBroadcastJobs) Create(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.CreateOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(broadcastjobsResource, c.ns, broadcastJob), &v1alpha1.BroadcastJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.BroadcastJob), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a broadcastJob and updates it. Returns the server's representation of the broadcastJob, and an error, if there is any.
|
||||
func (c *FakeBroadcastJobs) Update(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.UpdateOptions) (result *v1alpha1.BroadcastJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(broadcastjobsResource, c.ns, broadcastJob), &v1alpha1.BroadcastJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.BroadcastJob), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeBroadcastJobs) UpdateStatus(ctx context.Context, broadcastJob *v1alpha1.BroadcastJob, opts v1.UpdateOptions) (*v1alpha1.BroadcastJob, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(broadcastjobsResource, "status", c.ns, broadcastJob), &v1alpha1.BroadcastJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.BroadcastJob), err
|
||||
}
|
||||
|
||||
// Delete takes name of the broadcastJob and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeBroadcastJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(broadcastjobsResource, c.ns, name, opts), &v1alpha1.BroadcastJob{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeBroadcastJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(broadcastjobsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.BroadcastJobList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched broadcastJob.
|
||||
func (c *FakeBroadcastJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BroadcastJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(broadcastjobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.BroadcastJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.BroadcastJob), err
|
||||
}
|
||||
|
|
|
@ -18,146 +18,61 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeCloneSets implements CloneSetInterface
|
||||
type FakeCloneSets struct {
|
||||
// fakeCloneSets implements CloneSetInterface
|
||||
type fakeCloneSets struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.CloneSet, *v1alpha1.CloneSetList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var clonesetsResource = v1alpha1.SchemeGroupVersion.WithResource("clonesets")
|
||||
|
||||
var clonesetsKind = v1alpha1.SchemeGroupVersion.WithKind("CloneSet")
|
||||
|
||||
// Get takes name of the cloneSet, and returns the corresponding cloneSet object, and an error if there is any.
|
||||
func (c *FakeCloneSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(clonesetsResource, c.ns, name), &v1alpha1.CloneSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeCloneSets(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.CloneSetInterface {
|
||||
return &fakeCloneSets{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.CloneSet, *v1alpha1.CloneSetList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("clonesets"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("CloneSet"),
|
||||
func() *v1alpha1.CloneSet { return &v1alpha1.CloneSet{} },
|
||||
func() *v1alpha1.CloneSetList { return &v1alpha1.CloneSetList{} },
|
||||
func(dst, src *v1alpha1.CloneSetList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.CloneSetList) []*v1alpha1.CloneSet { return gentype.ToPointerSlice(list.Items) },
|
||||
func(list *v1alpha1.CloneSetList, items []*v1alpha1.CloneSet) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.CloneSet), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of CloneSets that match those selectors.
|
||||
func (c *FakeCloneSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloneSetList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(clonesetsResource, clonesetsKind, c.ns, opts), &v1alpha1.CloneSetList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.CloneSetList{ListMeta: obj.(*v1alpha1.CloneSetList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.CloneSetList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested cloneSets.
|
||||
func (c *FakeCloneSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(clonesetsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a cloneSet and creates it. Returns the server's representation of the cloneSet, and an error, if there is any.
|
||||
func (c *FakeCloneSets) Create(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.CreateOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(clonesetsResource, c.ns, cloneSet), &v1alpha1.CloneSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.CloneSet), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a cloneSet and updates it. Returns the server's representation of the cloneSet, and an error, if there is any.
|
||||
func (c *FakeCloneSets) Update(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.UpdateOptions) (result *v1alpha1.CloneSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(clonesetsResource, c.ns, cloneSet), &v1alpha1.CloneSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.CloneSet), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeCloneSets) UpdateStatus(ctx context.Context, cloneSet *v1alpha1.CloneSet, opts v1.UpdateOptions) (*v1alpha1.CloneSet, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(clonesetsResource, "status", c.ns, cloneSet), &v1alpha1.CloneSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.CloneSet), err
|
||||
}
|
||||
|
||||
// Delete takes name of the cloneSet and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeCloneSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(clonesetsResource, c.ns, name, opts), &v1alpha1.CloneSet{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeCloneSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(clonesetsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.CloneSetList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched cloneSet.
|
||||
func (c *FakeCloneSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloneSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(clonesetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloneSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.CloneSet), err
|
||||
}
|
||||
|
||||
// GetScale takes name of the cloneSet, and returns the corresponding scale object, and an error if there is any.
|
||||
func (c *FakeCloneSets) GetScale(ctx context.Context, cloneSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) {
|
||||
func (c *fakeCloneSets) GetScale(ctx context.Context, cloneSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) {
|
||||
emptyResult := &autoscalingv1.Scale{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetSubresourceAction(clonesetsResource, c.ns, "scale", cloneSetName), &autoscalingv1.Scale{})
|
||||
Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", cloneSetName, options), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*autoscalingv1.Scale), err
|
||||
}
|
||||
|
||||
// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
|
||||
func (c *FakeCloneSets) UpdateScale(ctx context.Context, cloneSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
|
||||
func (c *fakeCloneSets) UpdateScale(ctx context.Context, cloneSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
|
||||
emptyResult := &autoscalingv1.Scale{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(clonesetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{})
|
||||
Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &autoscalingv1.Scale{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*autoscalingv1.Scale), err
|
||||
}
|
||||
|
|
|
@ -18,123 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeContainerRecreateRequests implements ContainerRecreateRequestInterface
|
||||
type FakeContainerRecreateRequests struct {
|
||||
// fakeContainerRecreateRequests implements ContainerRecreateRequestInterface
|
||||
type fakeContainerRecreateRequests struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.ContainerRecreateRequest, *v1alpha1.ContainerRecreateRequestList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var containerrecreaterequestsResource = v1alpha1.SchemeGroupVersion.WithResource("containerrecreaterequests")
|
||||
|
||||
var containerrecreaterequestsKind = v1alpha1.SchemeGroupVersion.WithKind("ContainerRecreateRequest")
|
||||
|
||||
// Get takes name of the containerRecreateRequest, and returns the corresponding containerRecreateRequest object, and an error if there is any.
|
||||
func (c *FakeContainerRecreateRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(containerrecreaterequestsResource, c.ns, name), &v1alpha1.ContainerRecreateRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeContainerRecreateRequests(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.ContainerRecreateRequestInterface {
|
||||
return &fakeContainerRecreateRequests{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.ContainerRecreateRequest, *v1alpha1.ContainerRecreateRequestList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("containerrecreaterequests"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("ContainerRecreateRequest"),
|
||||
func() *v1alpha1.ContainerRecreateRequest { return &v1alpha1.ContainerRecreateRequest{} },
|
||||
func() *v1alpha1.ContainerRecreateRequestList { return &v1alpha1.ContainerRecreateRequestList{} },
|
||||
func(dst, src *v1alpha1.ContainerRecreateRequestList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.ContainerRecreateRequestList) []*v1alpha1.ContainerRecreateRequest {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.ContainerRecreateRequestList, items []*v1alpha1.ContainerRecreateRequest) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.ContainerRecreateRequest), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ContainerRecreateRequests that match those selectors.
|
||||
func (c *FakeContainerRecreateRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ContainerRecreateRequestList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(containerrecreaterequestsResource, containerrecreaterequestsKind, c.ns, opts), &v1alpha1.ContainerRecreateRequestList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.ContainerRecreateRequestList{ListMeta: obj.(*v1alpha1.ContainerRecreateRequestList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.ContainerRecreateRequestList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested containerRecreateRequests.
|
||||
func (c *FakeContainerRecreateRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(containerrecreaterequestsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a containerRecreateRequest and creates it. Returns the server's representation of the containerRecreateRequest, and an error, if there is any.
|
||||
func (c *FakeContainerRecreateRequests) Create(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.CreateOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(containerrecreaterequestsResource, c.ns, containerRecreateRequest), &v1alpha1.ContainerRecreateRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ContainerRecreateRequest), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a containerRecreateRequest and updates it. Returns the server's representation of the containerRecreateRequest, and an error, if there is any.
|
||||
func (c *FakeContainerRecreateRequests) Update(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(containerrecreaterequestsResource, c.ns, containerRecreateRequest), &v1alpha1.ContainerRecreateRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ContainerRecreateRequest), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeContainerRecreateRequests) UpdateStatus(ctx context.Context, containerRecreateRequest *v1alpha1.ContainerRecreateRequest, opts v1.UpdateOptions) (*v1alpha1.ContainerRecreateRequest, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(containerrecreaterequestsResource, "status", c.ns, containerRecreateRequest), &v1alpha1.ContainerRecreateRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ContainerRecreateRequest), err
|
||||
}
|
||||
|
||||
// Delete takes name of the containerRecreateRequest and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeContainerRecreateRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(containerrecreaterequestsResource, c.ns, name, opts), &v1alpha1.ContainerRecreateRequest{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeContainerRecreateRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(containerrecreaterequestsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.ContainerRecreateRequestList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched containerRecreateRequest.
|
||||
func (c *FakeContainerRecreateRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ContainerRecreateRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(containerrecreaterequestsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ContainerRecreateRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ContainerRecreateRequest), err
|
||||
}
|
||||
|
|
|
@ -18,123 +18,32 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeDaemonSets implements DaemonSetInterface
|
||||
type FakeDaemonSets struct {
|
||||
// fakeDaemonSets implements DaemonSetInterface
|
||||
type fakeDaemonSets struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.DaemonSet, *v1alpha1.DaemonSetList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var daemonsetsResource = v1alpha1.SchemeGroupVersion.WithResource("daemonsets")
|
||||
|
||||
var daemonsetsKind = v1alpha1.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
|
||||
// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
|
||||
func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1alpha1.DaemonSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeDaemonSets(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.DaemonSetInterface {
|
||||
return &fakeDaemonSets{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.DaemonSet, *v1alpha1.DaemonSetList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("daemonsets"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("DaemonSet"),
|
||||
func() *v1alpha1.DaemonSet { return &v1alpha1.DaemonSet{} },
|
||||
func() *v1alpha1.DaemonSetList { return &v1alpha1.DaemonSetList{} },
|
||||
func(dst, src *v1alpha1.DaemonSetList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.DaemonSetList) []*v1alpha1.DaemonSet { return gentype.ToPointerSlice(list.Items) },
|
||||
func(list *v1alpha1.DaemonSetList, items []*v1alpha1.DaemonSet) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.DaemonSet), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
|
||||
func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DaemonSetList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1alpha1.DaemonSetList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.DaemonSetList{ListMeta: obj.(*v1alpha1.DaemonSetList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.DaemonSetList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested daemonSets.
|
||||
func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any.
|
||||
func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.CreateOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1alpha1.DaemonSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.DaemonSet), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
|
||||
func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.UpdateOptions) (result *v1alpha1.DaemonSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1alpha1.DaemonSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.DaemonSet), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1alpha1.DaemonSet, opts v1.UpdateOptions) (*v1alpha1.DaemonSet, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1alpha1.DaemonSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.DaemonSet), err
|
||||
}
|
||||
|
||||
// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1alpha1.DaemonSet{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.DaemonSetList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched daemonSet.
|
||||
func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DaemonSet, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.DaemonSet{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.DaemonSet), err
|
||||
}
|
||||
|
|
|
@ -18,123 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeEphemeralJobs implements EphemeralJobInterface
|
||||
type FakeEphemeralJobs struct {
|
||||
// fakeEphemeralJobs implements EphemeralJobInterface
|
||||
type fakeEphemeralJobs struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.EphemeralJob, *v1alpha1.EphemeralJobList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var ephemeraljobsResource = v1alpha1.SchemeGroupVersion.WithResource("ephemeraljobs")
|
||||
|
||||
var ephemeraljobsKind = v1alpha1.SchemeGroupVersion.WithKind("EphemeralJob")
|
||||
|
||||
// Get takes name of the ephemeralJob, and returns the corresponding ephemeralJob object, and an error if there is any.
|
||||
func (c *FakeEphemeralJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(ephemeraljobsResource, c.ns, name), &v1alpha1.EphemeralJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeEphemeralJobs(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.EphemeralJobInterface {
|
||||
return &fakeEphemeralJobs{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.EphemeralJob, *v1alpha1.EphemeralJobList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("ephemeraljobs"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("EphemeralJob"),
|
||||
func() *v1alpha1.EphemeralJob { return &v1alpha1.EphemeralJob{} },
|
||||
func() *v1alpha1.EphemeralJobList { return &v1alpha1.EphemeralJobList{} },
|
||||
func(dst, src *v1alpha1.EphemeralJobList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.EphemeralJobList) []*v1alpha1.EphemeralJob {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.EphemeralJobList, items []*v1alpha1.EphemeralJob) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.EphemeralJob), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of EphemeralJobs that match those selectors.
|
||||
func (c *FakeEphemeralJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EphemeralJobList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(ephemeraljobsResource, ephemeraljobsKind, c.ns, opts), &v1alpha1.EphemeralJobList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.EphemeralJobList{ListMeta: obj.(*v1alpha1.EphemeralJobList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.EphemeralJobList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested ephemeralJobs.
|
||||
func (c *FakeEphemeralJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(ephemeraljobsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a ephemeralJob and creates it. Returns the server's representation of the ephemeralJob, and an error, if there is any.
|
||||
func (c *FakeEphemeralJobs) Create(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.CreateOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(ephemeraljobsResource, c.ns, ephemeralJob), &v1alpha1.EphemeralJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.EphemeralJob), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a ephemeralJob and updates it. Returns the server's representation of the ephemeralJob, and an error, if there is any.
|
||||
func (c *FakeEphemeralJobs) Update(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.UpdateOptions) (result *v1alpha1.EphemeralJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(ephemeraljobsResource, c.ns, ephemeralJob), &v1alpha1.EphemeralJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.EphemeralJob), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeEphemeralJobs) UpdateStatus(ctx context.Context, ephemeralJob *v1alpha1.EphemeralJob, opts v1.UpdateOptions) (*v1alpha1.EphemeralJob, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(ephemeraljobsResource, "status", c.ns, ephemeralJob), &v1alpha1.EphemeralJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.EphemeralJob), err
|
||||
}
|
||||
|
||||
// Delete takes name of the ephemeralJob and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeEphemeralJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(ephemeraljobsResource, c.ns, name, opts), &v1alpha1.EphemeralJob{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeEphemeralJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(ephemeraljobsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.EphemeralJobList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched ephemeralJob.
|
||||
func (c *FakeEphemeralJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EphemeralJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(ephemeraljobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.EphemeralJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.EphemeralJob), err
|
||||
}
|
||||
|
|
|
@ -18,123 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeImageListPullJobs implements ImageListPullJobInterface
|
||||
type FakeImageListPullJobs struct {
|
||||
// fakeImageListPullJobs implements ImageListPullJobInterface
|
||||
type fakeImageListPullJobs struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.ImageListPullJob, *v1alpha1.ImageListPullJobList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var imagelistpulljobsResource = v1alpha1.SchemeGroupVersion.WithResource("imagelistpulljobs")
|
||||
|
||||
var imagelistpulljobsKind = v1alpha1.SchemeGroupVersion.WithKind("ImageListPullJob")
|
||||
|
||||
// Get takes name of the imageListPullJob, and returns the corresponding imageListPullJob object, and an error if there is any.
|
||||
func (c *FakeImageListPullJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ImageListPullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(imagelistpulljobsResource, c.ns, name), &v1alpha1.ImageListPullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeImageListPullJobs(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.ImageListPullJobInterface {
|
||||
return &fakeImageListPullJobs{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.ImageListPullJob, *v1alpha1.ImageListPullJobList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("imagelistpulljobs"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("ImageListPullJob"),
|
||||
func() *v1alpha1.ImageListPullJob { return &v1alpha1.ImageListPullJob{} },
|
||||
func() *v1alpha1.ImageListPullJobList { return &v1alpha1.ImageListPullJobList{} },
|
||||
func(dst, src *v1alpha1.ImageListPullJobList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.ImageListPullJobList) []*v1alpha1.ImageListPullJob {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.ImageListPullJobList, items []*v1alpha1.ImageListPullJob) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.ImageListPullJob), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ImageListPullJobs that match those selectors.
|
||||
func (c *FakeImageListPullJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ImageListPullJobList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(imagelistpulljobsResource, imagelistpulljobsKind, c.ns, opts), &v1alpha1.ImageListPullJobList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.ImageListPullJobList{ListMeta: obj.(*v1alpha1.ImageListPullJobList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.ImageListPullJobList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested imageListPullJobs.
|
||||
func (c *FakeImageListPullJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(imagelistpulljobsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a imageListPullJob and creates it. Returns the server's representation of the imageListPullJob, and an error, if there is any.
|
||||
func (c *FakeImageListPullJobs) Create(ctx context.Context, imageListPullJob *v1alpha1.ImageListPullJob, opts v1.CreateOptions) (result *v1alpha1.ImageListPullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(imagelistpulljobsResource, c.ns, imageListPullJob), &v1alpha1.ImageListPullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImageListPullJob), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a imageListPullJob and updates it. Returns the server's representation of the imageListPullJob, and an error, if there is any.
|
||||
func (c *FakeImageListPullJobs) Update(ctx context.Context, imageListPullJob *v1alpha1.ImageListPullJob, opts v1.UpdateOptions) (result *v1alpha1.ImageListPullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(imagelistpulljobsResource, c.ns, imageListPullJob), &v1alpha1.ImageListPullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImageListPullJob), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeImageListPullJobs) UpdateStatus(ctx context.Context, imageListPullJob *v1alpha1.ImageListPullJob, opts v1.UpdateOptions) (*v1alpha1.ImageListPullJob, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(imagelistpulljobsResource, "status", c.ns, imageListPullJob), &v1alpha1.ImageListPullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImageListPullJob), err
|
||||
}
|
||||
|
||||
// Delete takes name of the imageListPullJob and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeImageListPullJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(imagelistpulljobsResource, c.ns, name, opts), &v1alpha1.ImageListPullJob{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeImageListPullJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(imagelistpulljobsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.ImageListPullJobList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched imageListPullJob.
|
||||
func (c *FakeImageListPullJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImageListPullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(imagelistpulljobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ImageListPullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImageListPullJob), err
|
||||
}
|
||||
|
|
|
@ -18,123 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeImagePullJobs implements ImagePullJobInterface
|
||||
type FakeImagePullJobs struct {
|
||||
// fakeImagePullJobs implements ImagePullJobInterface
|
||||
type fakeImagePullJobs struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.ImagePullJob, *v1alpha1.ImagePullJobList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
ns string
|
||||
}
|
||||
|
||||
var imagepulljobsResource = v1alpha1.SchemeGroupVersion.WithResource("imagepulljobs")
|
||||
|
||||
var imagepulljobsKind = v1alpha1.SchemeGroupVersion.WithKind("ImagePullJob")
|
||||
|
||||
// Get takes name of the imagePullJob, and returns the corresponding imagePullJob object, and an error if there is any.
|
||||
func (c *FakeImagePullJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ImagePullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(imagepulljobsResource, c.ns, name), &v1alpha1.ImagePullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeImagePullJobs(fake *FakeAppsV1alpha1, namespace string) appsv1alpha1.ImagePullJobInterface {
|
||||
return &fakeImagePullJobs{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.ImagePullJob, *v1alpha1.ImagePullJobList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1alpha1.SchemeGroupVersion.WithResource("imagepulljobs"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("ImagePullJob"),
|
||||
func() *v1alpha1.ImagePullJob { return &v1alpha1.ImagePullJob{} },
|
||||
func() *v1alpha1.ImagePullJobList { return &v1alpha1.ImagePullJobList{} },
|
||||
func(dst, src *v1alpha1.ImagePullJobList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.ImagePullJobList) []*v1alpha1.ImagePullJob {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.ImagePullJobList, items []*v1alpha1.ImagePullJob) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.ImagePullJob), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ImagePullJobs that match those selectors.
|
||||
func (c *FakeImagePullJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ImagePullJobList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(imagepulljobsResource, imagepulljobsKind, c.ns, opts), &v1alpha1.ImagePullJobList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.ImagePullJobList{ListMeta: obj.(*v1alpha1.ImagePullJobList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.ImagePullJobList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested imagePullJobs.
|
||||
func (c *FakeImagePullJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(imagepulljobsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a imagePullJob and creates it. Returns the server's representation of the imagePullJob, and an error, if there is any.
|
||||
func (c *FakeImagePullJobs) Create(ctx context.Context, imagePullJob *v1alpha1.ImagePullJob, opts v1.CreateOptions) (result *v1alpha1.ImagePullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(imagepulljobsResource, c.ns, imagePullJob), &v1alpha1.ImagePullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImagePullJob), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a imagePullJob and updates it. Returns the server's representation of the imagePullJob, and an error, if there is any.
|
||||
func (c *FakeImagePullJobs) Update(ctx context.Context, imagePullJob *v1alpha1.ImagePullJob, opts v1.UpdateOptions) (result *v1alpha1.ImagePullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(imagepulljobsResource, c.ns, imagePullJob), &v1alpha1.ImagePullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImagePullJob), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeImagePullJobs) UpdateStatus(ctx context.Context, imagePullJob *v1alpha1.ImagePullJob, opts v1.UpdateOptions) (*v1alpha1.ImagePullJob, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(imagepulljobsResource, "status", c.ns, imagePullJob), &v1alpha1.ImagePullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImagePullJob), err
|
||||
}
|
||||
|
||||
// Delete takes name of the imagePullJob and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeImagePullJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(imagepulljobsResource, c.ns, name, opts), &v1alpha1.ImagePullJob{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeImagePullJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(imagepulljobsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.ImagePullJobList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched imagePullJob.
|
||||
func (c *FakeImagePullJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImagePullJob, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(imagepulljobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ImagePullJob{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ImagePullJob), err
|
||||
}
|
||||
|
|
|
@ -18,114 +18,32 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeNodeImages implements NodeImageInterface
|
||||
type FakeNodeImages struct {
|
||||
// fakeNodeImages implements NodeImageInterface
|
||||
type fakeNodeImages struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.NodeImage, *v1alpha1.NodeImageList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
}
|
||||
|
||||
var nodeimagesResource = v1alpha1.SchemeGroupVersion.WithResource("nodeimages")
|
||||
|
||||
var nodeimagesKind = v1alpha1.SchemeGroupVersion.WithKind("NodeImage")
|
||||
|
||||
// Get takes name of the nodeImage, and returns the corresponding nodeImage object, and an error if there is any.
|
||||
func (c *FakeNodeImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeImage, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(nodeimagesResource, name), &v1alpha1.NodeImage{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeNodeImages(fake *FakeAppsV1alpha1) appsv1alpha1.NodeImageInterface {
|
||||
return &fakeNodeImages{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.NodeImage, *v1alpha1.NodeImageList](
|
||||
fake.Fake,
|
||||
"",
|
||||
v1alpha1.SchemeGroupVersion.WithResource("nodeimages"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("NodeImage"),
|
||||
func() *v1alpha1.NodeImage { return &v1alpha1.NodeImage{} },
|
||||
func() *v1alpha1.NodeImageList { return &v1alpha1.NodeImageList{} },
|
||||
func(dst, src *v1alpha1.NodeImageList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.NodeImageList) []*v1alpha1.NodeImage { return gentype.ToPointerSlice(list.Items) },
|
||||
func(list *v1alpha1.NodeImageList, items []*v1alpha1.NodeImage) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.NodeImage), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of NodeImages that match those selectors.
|
||||
func (c *FakeNodeImages) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeImageList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(nodeimagesResource, nodeimagesKind, opts), &v1alpha1.NodeImageList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.NodeImageList{ListMeta: obj.(*v1alpha1.NodeImageList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.NodeImageList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested nodeImages.
|
||||
func (c *FakeNodeImages) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(nodeimagesResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a nodeImage and creates it. Returns the server's representation of the nodeImage, and an error, if there is any.
|
||||
func (c *FakeNodeImages) Create(ctx context.Context, nodeImage *v1alpha1.NodeImage, opts v1.CreateOptions) (result *v1alpha1.NodeImage, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(nodeimagesResource, nodeImage), &v1alpha1.NodeImage{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodeImage), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a nodeImage and updates it. Returns the server's representation of the nodeImage, and an error, if there is any.
|
||||
func (c *FakeNodeImages) Update(ctx context.Context, nodeImage *v1alpha1.NodeImage, opts v1.UpdateOptions) (result *v1alpha1.NodeImage, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(nodeimagesResource, nodeImage), &v1alpha1.NodeImage{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodeImage), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeNodeImages) UpdateStatus(ctx context.Context, nodeImage *v1alpha1.NodeImage, opts v1.UpdateOptions) (*v1alpha1.NodeImage, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(nodeimagesResource, "status", nodeImage), &v1alpha1.NodeImage{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodeImage), err
|
||||
}
|
||||
|
||||
// Delete takes name of the nodeImage and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeNodeImages) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(nodeimagesResource, name, opts), &v1alpha1.NodeImage{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeNodeImages) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(nodeimagesResource, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.NodeImageList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched nodeImage.
|
||||
func (c *FakeNodeImages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeImage, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(nodeimagesResource, name, pt, data, subresources...), &v1alpha1.NodeImage{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodeImage), err
|
||||
}
|
||||
|
|
|
@ -18,114 +18,34 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/pkg/client/clientset/versioned/typed/apps/v1alpha1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeNodePodProbes implements NodePodProbeInterface
|
||||
type FakeNodePodProbes struct {
|
||||
// fakeNodePodProbes implements NodePodProbeInterface
|
||||
type fakeNodePodProbes struct {
|
||||
*gentype.FakeClientWithList[*v1alpha1.NodePodProbe, *v1alpha1.NodePodProbeList]
|
||||
Fake *FakeAppsV1alpha1
|
||||
}
|
||||
|
||||
var nodepodprobesResource = v1alpha1.SchemeGroupVersion.WithResource("nodepodprobes")
|
||||
|
||||
var nodepodprobesKind = v1alpha1.SchemeGroupVersion.WithKind("NodePodProbe")
|
||||
|
||||
// Get takes name of the nodePodProbe, and returns the corresponding nodePodProbe object, and an error if there is any.
|
||||
func (c *FakeNodePodProbes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodePodProbe, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(nodepodprobesResource, name), &v1alpha1.NodePodProbe{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
func newFakeNodePodProbes(fake *FakeAppsV1alpha1) appsv1alpha1.NodePodProbeInterface {
|
||||
return &fakeNodePodProbes{
|
||||
gentype.NewFakeClientWithList[*v1alpha1.NodePodProbe, *v1alpha1.NodePodProbeList](
|
||||
fake.Fake,
|
||||
"",
|
||||
v1alpha1.SchemeGroupVersion.WithResource("nodepodprobes"),
|
||||
v1alpha1.SchemeGroupVersion.WithKind("NodePodProbe"),
|
||||
func() *v1alpha1.NodePodProbe { return &v1alpha1.NodePodProbe{} },
|
||||
func() *v1alpha1.NodePodProbeList { return &v1alpha1.NodePodProbeList{} },
|
||||
func(dst, src *v1alpha1.NodePodProbeList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1alpha1.NodePodProbeList) []*v1alpha1.NodePodProbe {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1alpha1.NodePodProbeList, items []*v1alpha1.NodePodProbe) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1alpha1.NodePodProbe), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of NodePodProbes that match those selectors.
|
||||
func (c *FakeNodePodProbes) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodePodProbeList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(nodepodprobesResource, nodepodprobesKind, opts), &v1alpha1.NodePodProbeList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.NodePodProbeList{ListMeta: obj.(*v1alpha1.NodePodProbeList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.NodePodProbeList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested nodePodProbes.
|
||||
func (c *FakeNodePodProbes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(nodepodprobesResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a nodePodProbe and creates it. Returns the server's representation of the nodePodProbe, and an error, if there is any.
|
||||
func (c *FakeNodePodProbes) Create(ctx context.Context, nodePodProbe *v1alpha1.NodePodProbe, opts v1.CreateOptions) (result *v1alpha1.NodePodProbe, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(nodepodprobesResource, nodePodProbe), &v1alpha1.NodePodProbe{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodePodProbe), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a nodePodProbe and updates it. Returns the server's representation of the nodePodProbe, and an error, if there is any.
|
||||
func (c *FakeNodePodProbes) Update(ctx context.Context, nodePodProbe *v1alpha1.NodePodProbe, opts v1.UpdateOptions) (result *v1alpha1.NodePodProbe, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(nodepodprobesResource, nodePodProbe), &v1alpha1.NodePodProbe{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodePodProbe), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeNodePodProbes) UpdateStatus(ctx context.Context, nodePodProbe *v1alpha1.NodePodProbe, opts v1.UpdateOptions) (*v1alpha1.NodePodProbe, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(nodepodprobesResource, "status", nodePodProbe), &v1alpha1.NodePodProbe{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodePodProbe), err
|
||||
}
|
||||
|
||||
// Delete takes name of the nodePodProbe and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeNodePodProbes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(nodepodprobesResource, name, opts), &v1alpha1.NodePodProbe{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeNodePodProbes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(nodepodprobesResource, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.NodePodProbeList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched nodePodProbe.
|
||||
func (c *FakeNodePodProbes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodePodProbe, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(nodepodprobesResource, name, pt, data, subresources...), &v1alpha1.NodePodProbe{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.NodePodProbe), err
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue