mirror of https://github.com/openkruise/kruise.git
Compare commits
9 Commits
Author | SHA1 | Date |
---|---|---|
|
535ba1bcec | |
|
99dfc93117 | |
|
5762908fee | |
|
c723e957fc | |
|
991c0f43cf | |
|
56f407c1b7 | |
|
d4b5d2f132 | |
|
2fe731cd69 | |
|
8ab2376196 |
|
@ -13,6 +13,16 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.HUB_KRIUSE }}
|
||||
- name: Build the Docker image
|
||||
run: IMG=openkruise/kruise-manager:${{ github.ref_name }} & make docker-multiarch
|
||||
run: |
|
||||
docker buildx create --use --platform=linux/amd64,linux/arm64,linux/ppc64le --name multi-platform-builder
|
||||
docker buildx ls
|
||||
IMG=openkruise/kruise-manager:${{ github.ref_name }} make docker-multiarch
|
||||
|
|
|
@ -1,5 +1,12 @@
|
|||
# Change Log
|
||||
|
||||
## v1.7.1
|
||||
> Change log since v1.7.0
|
||||
|
||||
### Bug fixes
|
||||
- When update crd webhook caBundle, if caBundle does not change, do not update crd again. ([#1717](https://github.com/openkruise/kruise/pull/1717), [@zmberg](https://github.com/zmberg))
|
||||
- Remove normal init container in pod's sidecarSet in-place update annotation. ([#1719](https://github.com/openkruise/kruise/pull/1719), [@zmberg](https://github.com/zmberg))
|
||||
|
||||
## v1.7.0
|
||||
> Change log since v1.6.3
|
||||
|
||||
|
|
9
go.mod
9
go.mod
|
@ -25,9 +25,9 @@ require (
|
|||
golang.org/x/time v0.3.0
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||
google.golang.org/grpc v1.63.0
|
||||
k8s.io/api v0.30.0
|
||||
k8s.io/apiextensions-apiserver v0.30.0
|
||||
k8s.io/apimachinery v0.30.0
|
||||
k8s.io/api v0.28.9
|
||||
k8s.io/apiextensions-apiserver v0.28.9
|
||||
k8s.io/apimachinery v0.28.9
|
||||
k8s.io/apiserver v0.28.9
|
||||
k8s.io/client-go v0.28.9
|
||||
k8s.io/code-generator v0.28.9
|
||||
|
@ -40,7 +40,7 @@ require (
|
|||
k8s.io/kubelet v0.28.9
|
||||
k8s.io/kubernetes v1.28.9
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.16.5
|
||||
sigs.k8s.io/controller-runtime v0.16.6
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -48,6 +48,7 @@ require (
|
|||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/go-logr/zapr v1.2.4 // indirect
|
||||
github.com/google/cel-go v0.16.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
|
|
18
go.sum
18
go.sum
|
@ -20,6 +20,7 @@ github.com/appscode/jsonpatch v1.0.1/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOf
|
|||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
|
@ -84,11 +85,13 @@ github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7Y
|
|||
github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
|
||||
github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA=
|
||||
github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU=
|
||||
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
|
||||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
|
@ -352,6 +355,7 @@ github.com/xyproto/simpleredis v0.0.0-20200201215242-1ff0da2967b4/go.mod h1:U/ZO
|
|||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
|
||||
|
@ -387,9 +391,13 @@ go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8
|
|||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
|
||||
go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -403,8 +411,10 @@ golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
|||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
|
@ -416,6 +426,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
|
@ -429,6 +440,7 @@ golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
|
@ -449,6 +461,7 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
|||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
@ -458,6 +471,7 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
|
@ -561,8 +575,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn
|
|||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw=
|
||||
sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
|
||||
sigs.k8s.io/controller-runtime v0.16.6 h1:FiXwTuFF5ZJKmozfP2Z0j7dh6kmxP4Ou1KLfxgKKC3I=
|
||||
sigs.k8s.io/controller-runtime v0.16.6/go.mod h1:+dQzkZxnylD0u49e0a+7AR+vlibEBaThmPca7lTyUsI=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
|
|
9
main.go
9
main.go
|
@ -33,6 +33,9 @@ import (
|
|||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
|
@ -125,11 +128,17 @@ func main() {
|
|||
flag.DurationVar(&controllerCacheSyncTimeout, "controller-cache-sync-timeout", defaultControllerCacheSyncTimeout, "CacheSyncTimeout refers to the time limit set to wait for syncing caches. Defaults to 2 minutes if not set.")
|
||||
|
||||
utilfeature.DefaultMutableFeatureGate.AddFlag(pflag.CommandLine)
|
||||
logOptions := logs.NewOptions()
|
||||
logsapi.AddFlags(logOptions, pflag.CommandLine)
|
||||
klog.InitFlags(nil)
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
pflag.Parse()
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
ctrl.SetLogger(klogr.New())
|
||||
if err := logsapi.ValidateAndApply(logOptions, nil); err != nil {
|
||||
setupLog.Error(err, "logsapi ValidateAndApply failed")
|
||||
os.Exit(1)
|
||||
}
|
||||
features.SetDefaultFeatureGates()
|
||||
util.SetControllerCacheSyncTimeout(controllerCacheSyncTimeout)
|
||||
|
||||
|
|
|
@ -242,14 +242,7 @@ func UpdatePodSidecarSetHash(pod *corev1.Pod, sidecarSet *appsv1alpha1.SidecarSe
|
|||
// compatible done
|
||||
}
|
||||
|
||||
sidecarList := sets.NewString()
|
||||
for _, sidecar := range sidecarSet.Spec.Containers {
|
||||
sidecarList.Insert(sidecar.Name)
|
||||
}
|
||||
for _, sidecar := range sidecarSet.Spec.InitContainers {
|
||||
sidecarList.Insert(sidecar.Name)
|
||||
}
|
||||
|
||||
sidecarList := listSidecarNameInSidecarSet(sidecarSet)
|
||||
sidecarSetHash[sidecarSet.Name] = SidecarSetUpgradeSpec{
|
||||
UpdateTimestamp: metav1.Now(),
|
||||
SidecarSetHash: GetSidecarSetRevision(sidecarSet),
|
||||
|
@ -575,3 +568,17 @@ func IsSidecarContainer(container corev1.Container) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listSidecarNameInSidecarSet list always init containers and sidecar containers
|
||||
func listSidecarNameInSidecarSet(sidecarSet *appsv1alpha1.SidecarSet) sets.String {
|
||||
sidecarList := sets.NewString()
|
||||
for _, sidecar := range sidecarSet.Spec.InitContainers {
|
||||
if IsSidecarContainer(sidecar.Container) {
|
||||
sidecarList.Insert(sidecar.Name)
|
||||
}
|
||||
}
|
||||
for _, sidecar := range sidecarSet.Spec.Containers {
|
||||
sidecarList.Insert(sidecar.Name)
|
||||
}
|
||||
return sidecarList
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction"
|
||||
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
|
||||
"github.com/openkruise/kruise/pkg/util/lifecycle"
|
||||
"github.com/openkruise/kruise/pkg/util/specifieddelete"
|
||||
)
|
||||
|
||||
// Realistic value for maximum in-flight requests when processing in parallel mode.
|
||||
|
@ -603,11 +604,17 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
}
|
||||
}
|
||||
|
||||
// handle specified deleted pod under maxUnavailable constrain
|
||||
// NOTE: specified deletion is not constraint by partition setting
|
||||
specifiedDeletedPods, err := ssc.handleSpecifiedDeletedPods(set, status, currentRevision, updateRevision, replicas, maxUnavailable, unavailablePods)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
updateIndexes := sortPodsToUpdate(set.Spec.UpdateStrategy.RollingUpdate, updateRevision.Name, *set.Spec.Replicas, replicas)
|
||||
klog.V(3).InfoS("Prepare to update pods indexes for StatefulSet", "statefulSet", klog.KObj(set), "podIndexes", updateIndexes)
|
||||
// update pods in sequence
|
||||
for _, target := range updateIndexes {
|
||||
|
||||
// the target is already up-to-date, go to next
|
||||
if getPodRevision(replicas[target]) == updateRevision.Name {
|
||||
continue
|
||||
|
@ -622,22 +629,26 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
}
|
||||
|
||||
// delete the Pod if it is not already terminating and does not match the update revision.
|
||||
if !isTerminating(replicas[target]) {
|
||||
if !specifiedDeletedPods.Has(replicas[target].Name) && !isTerminating(replicas[target]) {
|
||||
// todo validate in-place for pub
|
||||
inplacing, inplaceUpdateErr := ssc.inPlaceUpdatePod(set, replicas[target], updateRevision, revisions)
|
||||
if inplaceUpdateErr != nil {
|
||||
return status, inplaceUpdateErr
|
||||
}
|
||||
// if pod is inplacing or actual deleting, decrease revision
|
||||
revisionNeedDecrease := inplacing
|
||||
if !inplacing {
|
||||
klog.V(2).InfoS("StatefulSet terminating Pod for update", "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[target]))
|
||||
if _, err := ssc.deletePod(set, replicas[target]); err != nil {
|
||||
if _, actualDeleting, err := ssc.deletePod(set, replicas[target]); err != nil {
|
||||
return status, err
|
||||
} else {
|
||||
revisionNeedDecrease = actualDeleting
|
||||
}
|
||||
}
|
||||
// mark target as unavailable because it's updated
|
||||
unavailablePods.Insert(replicas[target].Name)
|
||||
|
||||
if getPodRevision(replicas[target]) == currentRevision.Name {
|
||||
if revisionNeedDecrease && getPodRevision(replicas[target]) == currentRevision.Name {
|
||||
status.CurrentReplicas--
|
||||
}
|
||||
}
|
||||
|
@ -646,22 +657,63 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
return status, nil
|
||||
}
|
||||
|
||||
func (ssc *defaultStatefulSetControl) deletePod(set *appsv1beta1.StatefulSet, pod *v1.Pod) (bool, error) {
|
||||
func (ssc *defaultStatefulSetControl) handleSpecifiedDeletedPods(
|
||||
set *appsv1beta1.StatefulSet,
|
||||
status *appsv1beta1.StatefulSetStatus,
|
||||
currentRevision *apps.ControllerRevision,
|
||||
updateRevision *apps.ControllerRevision,
|
||||
replicas []*v1.Pod,
|
||||
maxUnavailable int,
|
||||
unavailablePods sets.String) (sets.String, error) {
|
||||
specifiedDeletedPods := sets.NewString()
|
||||
for target := len(replicas) - 1; target >= 0; target-- {
|
||||
if replicas[target] == nil || !specifieddelete.IsSpecifiedDelete(replicas[target]) {
|
||||
continue
|
||||
}
|
||||
// the unavailable pods count exceed the maxUnavailable and the target is available, so we can't process it,
|
||||
// why skip here rather than return?
|
||||
// case: pod 0 ready, pod1 unready, pod 2 unready, pod3 ready, pod4 ready
|
||||
// when maxUnavailable = 3, pod4 with specified deleted will be deleted but pod3 can't
|
||||
// pod 2 and pod 1 can be deleted because they were unavailable
|
||||
if len(unavailablePods) >= maxUnavailable && !unavailablePods.Has(replicas[target].Name) {
|
||||
klog.V(4).InfoS("StatefulSet was waiting for unavailable Pods to update, blocked pod",
|
||||
"statefulSet", klog.KObj(set), "unavailablePods", unavailablePods.List(), "blockedPod", klog.KObj(replicas[target]))
|
||||
continue
|
||||
}
|
||||
|
||||
specifiedDeletedPods.Insert(replicas[target].Name)
|
||||
if _, actualDeleting, err := ssc.deletePod(set, replicas[target]); err != nil {
|
||||
return specifiedDeletedPods, err
|
||||
} else if actualDeleting {
|
||||
// if actual deleted, update revision count in status
|
||||
if getPodRevision(replicas[target]) == currentRevision.Name {
|
||||
status.CurrentReplicas--
|
||||
} else if getPodRevision(replicas[target]) == updateRevision.Name {
|
||||
status.UpdatedReplicas--
|
||||
}
|
||||
}
|
||||
// mark target as unavailable because it's deleting or pre-deleting
|
||||
unavailablePods.Insert(replicas[target].Name)
|
||||
}
|
||||
return specifiedDeletedPods, nil
|
||||
}
|
||||
|
||||
func (ssc *defaultStatefulSetControl) deletePod(set *appsv1beta1.StatefulSet, pod *v1.Pod) (modified, actualDeleting bool, err error) {
|
||||
if set.Spec.Lifecycle != nil && lifecycle.IsPodHooked(set.Spec.Lifecycle.PreDelete, pod) {
|
||||
markPodNotReady := set.Spec.Lifecycle.PreDelete.MarkPodNotReady
|
||||
if updated, _, err := ssc.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingDelete, markPodNotReady); err != nil {
|
||||
return false, err
|
||||
return false, false, err
|
||||
} else if updated {
|
||||
klog.V(3).InfoS("StatefulSet scaling update pod lifecycle to PreparingDelete", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
return true, false, nil
|
||||
}
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
if err := ssc.podControl.DeleteStatefulPod(set, pod); err != nil {
|
||||
ssc.recorder.Eventf(set, v1.EventTypeWarning, "FailedDelete", "failed to delete pod %s: %v", pod.Name, err)
|
||||
return false, err
|
||||
return false, false, err
|
||||
}
|
||||
return true, nil
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
func (ssc *defaultStatefulSetControl) refreshPodState(set *appsv1beta1.StatefulSet, pod *v1.Pod, updateRevision string) (bool, time.Duration, error) {
|
||||
|
@ -947,7 +999,7 @@ func (ssc *defaultStatefulSetControl) processCondemned(ctx context.Context, set
|
|||
logger.V(2).Info("Pod of StatefulSet is terminating for scale down",
|
||||
"statefulSet", klog.KObj(set), "pod", klog.KObj(condemned[i]))
|
||||
|
||||
modified, err := ssc.deletePod(set, condemned[i])
|
||||
modified, _, err := ssc.deletePod(set, condemned[i])
|
||||
if err != nil || (monotonic && modified) {
|
||||
return true, err
|
||||
}
|
||||
|
@ -990,7 +1042,7 @@ func (ssc *defaultStatefulSetControl) processReplica(
|
|||
// regardless of the exit code.
|
||||
if isFailed(replicas[i]) || isSucceeded(replicas[i]) {
|
||||
if replicas[i].DeletionTimestamp == nil {
|
||||
if _, err := ssc.deletePod(set, replicas[i]); err != nil {
|
||||
if _, _, err := ssc.deletePod(set, replicas[i]); err != nil {
|
||||
return true, false, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -50,6 +51,7 @@ import (
|
|||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
appspub "github.com/openkruise/kruise/apis/apps/pub"
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
|
||||
kruisefake "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake"
|
||||
|
@ -2211,6 +2213,153 @@ func TestStatefulSetControlRollingUpdateBlockByMaxUnavailable(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStatefulSetControlRollingUpdateWithSpecifiedDelete(t *testing.T) {
|
||||
set := burst(newStatefulSet(6))
|
||||
var partition int32 = 3
|
||||
var maxUnavailable = intstr.FromInt(3)
|
||||
set.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy {
|
||||
return &appsv1beta1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: &partition,
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType,
|
||||
}
|
||||
}(),
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset()
|
||||
kruiseClient := kruisefake.NewSimpleClientset(set)
|
||||
spc, _, ssc, stop := setupController(client, kruiseClient)
|
||||
defer close(stop)
|
||||
if err := scaleUpStatefulSetControl(set, ssc, spc, assertBurstInvariants); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
set, err := spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// set pod 0 to specified delete
|
||||
originalPods, err := spc.setPodSpecifiedDelete(set, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(originalPods))
|
||||
|
||||
// start to update
|
||||
set.Spec.Template.Spec.Containers[0].Image = "foo"
|
||||
|
||||
// first update pod 5 only because pod 0 is specified deleted
|
||||
if err = ssc.UpdateStatefulSet(context.TODO(), set, originalPods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err := spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// inplace update 5 and create 0
|
||||
if err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(pods) != 6 {
|
||||
t.Fatalf("Expected create pods 5, got pods %v", pods)
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
_, exist := pods[0].Labels[appsv1alpha1.SpecifiedDeleteKey]
|
||||
assert.True(t, !exist)
|
||||
// pod 0 is old image and pod 5/4 is new image
|
||||
assert.Equal(t, pods[5].Spec.Containers[0].Image, "foo")
|
||||
assert.Equal(t, pods[4].Spec.Containers[0].Image, "foo")
|
||||
assert.Equal(t, pods[0].Spec.Containers[0].Image, "nginx")
|
||||
|
||||
// set pod 1/2/5 to specified deleted and pod 0/4/5 to ready
|
||||
spc.setPodSpecifiedDelete(set, 0)
|
||||
spc.setPodSpecifiedDelete(set, 1)
|
||||
spc.setPodSpecifiedDelete(set, 2)
|
||||
for i := 0; i < 6; i++ {
|
||||
spc.setPodRunning(set, i)
|
||||
spc.setPodReady(set, i)
|
||||
}
|
||||
originalPods, _ = spc.setPodSpecifiedDelete(set, 5)
|
||||
sort.Sort(ascendingOrdinal(originalPods))
|
||||
|
||||
// create new pod for 1/2/5, do not update 3
|
||||
if err = ssc.UpdateStatefulSet(context.TODO(), set, originalPods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create new pods 5 and inplace update 3
|
||||
if err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
if len(pods) != 6 {
|
||||
t.Fatalf("Expected create pods 5, got pods %v", pods)
|
||||
}
|
||||
|
||||
_, exist = pods[5].Labels[appsv1alpha1.SpecifiedDeleteKey]
|
||||
assert.True(t, !exist)
|
||||
_, exist = pods[2].Labels[appsv1alpha1.SpecifiedDeleteKey]
|
||||
assert.True(t, !exist)
|
||||
_, exist = pods[1].Labels[appsv1alpha1.SpecifiedDeleteKey]
|
||||
assert.True(t, !exist)
|
||||
// pod 0 still undeleted
|
||||
_, exist = pods[0].Labels[appsv1alpha1.SpecifiedDeleteKey]
|
||||
assert.True(t, exist)
|
||||
assert.Equal(t, pods[5].Spec.Containers[0].Image, "foo")
|
||||
assert.Equal(t, pods[3].Spec.Containers[0].Image, "nginx")
|
||||
assert.Equal(t, pods[2].Spec.Containers[0].Image, "nginx")
|
||||
assert.Equal(t, pods[1].Spec.Containers[0].Image, "nginx")
|
||||
|
||||
// set pod 3 to specified deleted and all pod to ready => pod3 will be deleted and updated
|
||||
for i := 0; i < 6; i++ {
|
||||
spc.setPodRunning(set, i)
|
||||
spc.setPodReady(set, i)
|
||||
}
|
||||
originalPods, _ = spc.setPodSpecifiedDelete(set, 3)
|
||||
sort.Sort(ascendingOrdinal(originalPods))
|
||||
// create new pod for 3, do not inplace-update 3
|
||||
if err = ssc.UpdateStatefulSet(context.TODO(), set, originalPods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create new pods 5 and inplace update 3
|
||||
if err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
if len(pods) != 6 {
|
||||
t.Fatalf("Expected create pods 5, got pods %v", pods)
|
||||
}
|
||||
assert.Equal(t, pods[3].Spec.Containers[0].Image, "foo")
|
||||
}
|
||||
|
||||
func TestStatefulSetControlInPlaceUpdate(t *testing.T) {
|
||||
set := burst(newStatefulSet(3))
|
||||
var partition int32 = 1
|
||||
|
@ -3107,6 +3256,21 @@ func (om *fakeObjectManager) setPodTerminated(set *appsv1beta1.StatefulSet, ordi
|
|||
return om.podsLister.Pods(set.Namespace).List(selector)
|
||||
}
|
||||
|
||||
func (om *fakeObjectManager) setPodSpecifiedDelete(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) {
|
||||
pod := newStatefulSetPod(set, ordinal)
|
||||
if pod.Labels == nil {
|
||||
pod.Labels = make(map[string]string)
|
||||
}
|
||||
pod.Labels[appsv1alpha1.SpecifiedDeleteKey] = "true"
|
||||
fakeResourceVersion(pod)
|
||||
om.podsIndexer.Update(pod)
|
||||
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return om.podsLister.Pods(set.Namespace).List(selector)
|
||||
}
|
||||
|
||||
var _ StatefulPodControlObjectManager = &fakeObjectManager{}
|
||||
|
||||
type fakeStatefulSetStatusUpdater struct {
|
||||
|
|
|
@ -373,7 +373,10 @@ func buildSidecars(isUpdated bool, pod *corev1.Pod, oldPod *corev1.Pod, matchedS
|
|||
if !isUpdated {
|
||||
for i := range sidecarSet.Spec.InitContainers {
|
||||
initContainer := &sidecarSet.Spec.InitContainers[i]
|
||||
sidecarList.Insert(initContainer.Name)
|
||||
// only insert k8s native sidecar container for in-place update
|
||||
if sidecarcontrol.IsSidecarContainer(initContainer.Container) {
|
||||
sidecarList.Insert(initContainer.Name)
|
||||
}
|
||||
// volumeMounts that injected into sidecar container
|
||||
// when volumeMounts SubPathExpr contains expansions, then need copy container EnvVars(injectEnvs)
|
||||
injectedMounts, injectedEnvs := sidecarcontrol.GetInjectedVolumeMountsAndEnvs(control, initContainer, pod)
|
||||
|
@ -421,7 +424,7 @@ func buildSidecars(isUpdated bool, pod *corev1.Pod, oldPod *corev1.Pod, matchedS
|
|||
transferEnvs := sidecarcontrol.GetSidecarTransferEnvs(sidecarContainer, pod)
|
||||
// append volumeMounts SubPathExpr environments
|
||||
transferEnvs = util.MergeEnvVar(transferEnvs, injectedEnvs)
|
||||
klog.InfoS("try to inject Container sidecar %v@%v/%v, with injected envs: %v, volumeMounts: %v",
|
||||
klog.InfoS("try to inject Container sidecar",
|
||||
"containerName", sidecarContainer.Name, "namespace", pod.Namespace, "podName", pod.Name, "envs", transferEnvs, "volumeMounts", injectedMounts)
|
||||
//when update pod object
|
||||
if isUpdated {
|
||||
|
|
|
@ -148,12 +148,14 @@ func Ensure(kubeClient clientset.Interface, handlers map[string]types.HandlerGet
|
|||
if _, err := kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Update(context.TODO(), mutatingConfig, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update %s: %v", mutatingWebhookConfigurationName, err)
|
||||
}
|
||||
klog.InfoS("Update caBundle success", "MutatingWebhookConfigurations", klog.KObj(mutatingConfig))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(validatingConfig, oldValidatingConfig) {
|
||||
if _, err := kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Update(context.TODO(), validatingConfig, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update %s: %v", validatingWebhookConfigurationName, err)
|
||||
}
|
||||
klog.InfoS("Update caBundle success", "ValidatingWebhookConfigurations", klog.KObj(validatingConfig))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -20,9 +20,8 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"reflect"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
|
@ -31,6 +30,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/openkruise/kruise/apis"
|
||||
"github.com/openkruise/kruise/pkg/features"
|
||||
|
@ -71,7 +72,7 @@ func Ensure(client apiextensionsclientset.Interface, lister apiextensionslisters
|
|||
}
|
||||
return nil
|
||||
}
|
||||
webhookConfig := apiextensionsv1.WebhookClientConfig{
|
||||
webhookConfig := &apiextensionsv1.WebhookClientConfig{
|
||||
CABundle: caBundle,
|
||||
}
|
||||
path := "/convert"
|
||||
|
@ -105,6 +106,7 @@ func Ensure(client apiextensionsclientset.Interface, lister apiextensionslisters
|
|||
if _, err := client.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), newCRD, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update CRD %s: %v", newCRD.Name, err)
|
||||
}
|
||||
klog.InfoS("Update caBundle success", "CustomResourceDefinitions", klog.KObj(newCRD))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -853,10 +853,12 @@ var _ = SIGDescribe("SidecarSet", func() {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
origin := sets.String{}
|
||||
for _, sidecar := range sidecarSetIn.Spec.Containers {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
for _, sidecar := range sidecarSetIn.Spec.InitContainers {
|
||||
if sidecarcontrol.IsSidecarContainer(sidecar.Container) {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
}
|
||||
for _, sidecar := range sidecarSetIn.Spec.Containers {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
// SidecarSetHashAnnotation = "kruise.io/sidecarset-hash"
|
||||
|
@ -902,10 +904,12 @@ var _ = SIGDescribe("SidecarSet", func() {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
origin := sets.String{}
|
||||
for _, sidecar := range sidecarSetIn.Spec.Containers {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
for _, sidecar := range sidecarSetIn.Spec.InitContainers {
|
||||
if sidecarcontrol.IsSidecarContainer(sidecar.Container) {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
}
|
||||
for _, sidecar := range sidecarSetIn.Spec.Containers {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
// SidecarSetHashAnnotation = "kruise.io/sidecarset-hash"
|
||||
|
@ -950,6 +954,11 @@ var _ = SIGDescribe("SidecarSet", func() {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
origin := sets.String{}
|
||||
for _, sidecar := range sidecarSetIn.Spec.InitContainers {
|
||||
if sidecarcontrol.IsSidecarContainer(sidecar.Container) {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
}
|
||||
for _, sidecar := range sidecarSetIn.Spec.Containers {
|
||||
origin.Insert(sidecar.Name)
|
||||
}
|
||||
|
|
|
@ -1113,6 +1113,16 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
gomega.Expect(pods.Items[i].Labels["test-update"]).To(gomega.Equal("yes"))
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: StatefulSet, Specified delete
|
||||
Description: Specified delete pod MUST under maxUnavailable constrain.
|
||||
*/
|
||||
framework.ConformanceIt("should perform rolling updates with specified-deleted", func() {
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss = framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
testWithSpecifiedDeleted(c, kc, ns, ss)
|
||||
})
|
||||
})
|
||||
|
||||
//ginkgo.Describe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
|
||||
|
@ -2220,3 +2230,101 @@ func updateStatefulSetWithRetries(ctx context.Context, kc kruiseclientset.Interf
|
|||
}
|
||||
return statefulSet, pollErr
|
||||
}
|
||||
|
||||
// This function is used by two tests to test StatefulSet rollbacks: one using
|
||||
// PVCs and one using no storage.
|
||||
func testWithSpecifiedDeleted(c clientset.Interface, kc kruiseclientset.Interface, ns string, ss *appsv1beta1.StatefulSet,
|
||||
fns ...func(update *appsv1beta1.StatefulSet)) {
|
||||
sst := framework.NewStatefulSetTester(c, kc)
|
||||
*(ss.Spec.Replicas) = 4
|
||||
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
specifiedDeletePod := func(idx int) {
|
||||
sst.SortStatefulPods(pods)
|
||||
oldUid := pods.Items[idx].UID
|
||||
err = setPodSpecifiedDelete(c, ns, pods.Items[idx].Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ss = sst.WaitForStatus(ss)
|
||||
name := pods.Items[idx].Name
|
||||
// wait be deleted
|
||||
sst.WaitForState(ss, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
ss = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == name {
|
||||
return pods.Items[i].UID != oldUid, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
sst.WaitForPodReady(ss, pods.Items[idx].Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
}
|
||||
specifiedDeletePod(1)
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
var partition int32 = 2
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
if update.Spec.UpdateStrategy.RollingUpdate == nil {
|
||||
update.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{}
|
||||
}
|
||||
update.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: &partition,
|
||||
}
|
||||
for _, fn := range fns {
|
||||
fn(update)
|
||||
}
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
specifiedDeletePod(2)
|
||||
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
specifiedDeletePod(1)
|
||||
for i := range pods.Items {
|
||||
if i >= int(partition) {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to updated revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
} else {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setPodSpecifiedDelete(c clientset.Interface, ns, name string) error {
|
||||
_, err := c.CoreV1().Pods(ns).Patch(context.TODO(), name, types.StrategicMergePatchType, []byte(`{"metadata":{"labels":{"apps.kruise.io/specified-delete":"true"}}}`), metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -37,6 +36,8 @@ import (
|
|||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
|
||||
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue